aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c89
-rw-r--r--kernel/sched_rt.c324
2 files changed, 302 insertions, 111 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index deb5ac8c12f3..dd1a1466c1e6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -464,11 +464,15 @@ struct rt_rq {
464 struct rt_prio_array active; 464 struct rt_prio_array active;
465 unsigned long rt_nr_running; 465 unsigned long rt_nr_running;
466#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 466#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
467 int highest_prio; /* highest queued rt task prio */ 467 struct {
468 int curr; /* highest queued rt task prio */
469 int next; /* next highest */
470 } highest_prio;
468#endif 471#endif
469#ifdef CONFIG_SMP 472#ifdef CONFIG_SMP
470 unsigned long rt_nr_migratory; 473 unsigned long rt_nr_migratory;
471 int overloaded; 474 int overloaded;
475 struct plist_head pushable_tasks;
472#endif 476#endif
473 int rt_throttled; 477 int rt_throttled;
474 u64 rt_time; 478 u64 rt_time;
@@ -1607,21 +1611,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1607 1611
1608#endif 1612#endif
1609 1613
1614#ifdef CONFIG_PREEMPT
1615
1610/* 1616/*
1611 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1617 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1618 * way at the expense of forcing extra atomic operations in all
1619 * invocations. This assures that the double_lock is acquired using the
1620 * same underlying policy as the spinlock_t on this architecture, which
1621 * reduces latency compared to the unfair variant below. However, it
1622 * also adds more overhead and therefore may reduce throughput.
1612 */ 1623 */
1613static int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1624static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1625 __releases(this_rq->lock)
1626 __acquires(busiest->lock)
1627 __acquires(this_rq->lock)
1628{
1629 spin_unlock(&this_rq->lock);
1630 double_rq_lock(this_rq, busiest);
1631
1632 return 1;
1633}
1634
1635#else
1636/*
1637 * Unfair double_lock_balance: Optimizes throughput at the expense of
1638 * latency by eliminating extra atomic operations when the locks are
1639 * already in proper order on entry. This favors lower cpu-ids and will
1640 * grant the double lock to lower cpus over higher ids under contention,
1641 * regardless of entry order into the function.
1642 */
1643static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1614 __releases(this_rq->lock) 1644 __releases(this_rq->lock)
1615 __acquires(busiest->lock) 1645 __acquires(busiest->lock)
1616 __acquires(this_rq->lock) 1646 __acquires(this_rq->lock)
1617{ 1647{
1618 int ret = 0; 1648 int ret = 0;
1619 1649
1620 if (unlikely(!irqs_disabled())) {
1621 /* printk() doesn't work good under rq->lock */
1622 spin_unlock(&this_rq->lock);
1623 BUG_ON(1);
1624 }
1625 if (unlikely(!spin_trylock(&busiest->lock))) { 1650 if (unlikely(!spin_trylock(&busiest->lock))) {
1626 if (busiest < this_rq) { 1651 if (busiest < this_rq) {
1627 spin_unlock(&this_rq->lock); 1652 spin_unlock(&this_rq->lock);
@@ -1634,6 +1659,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1634 return ret; 1659 return ret;
1635} 1660}
1636 1661
1662#endif /* CONFIG_PREEMPT */
1663
1664/*
1665 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1666 */
1667static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1668{
1669 if (unlikely(!irqs_disabled())) {
1670 /* printk() doesn't work good under rq->lock */
1671 spin_unlock(&this_rq->lock);
1672 BUG_ON(1);
1673 }
1674
1675 return _double_lock_balance(this_rq, busiest);
1676}
1677
1637static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1678static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1638 __releases(busiest->lock) 1679 __releases(busiest->lock)
1639{ 1680{
@@ -2445,6 +2486,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2445 /* Want to start with kernel preemption disabled. */ 2486 /* Want to start with kernel preemption disabled. */
2446 task_thread_info(p)->preempt_count = 1; 2487 task_thread_info(p)->preempt_count = 1;
2447#endif 2488#endif
2489 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2490
2448 put_cpu(); 2491 put_cpu();
2449} 2492}
2450 2493
@@ -2585,6 +2628,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2585{ 2628{
2586 struct mm_struct *mm = rq->prev_mm; 2629 struct mm_struct *mm = rq->prev_mm;
2587 long prev_state; 2630 long prev_state;
2631#ifdef CONFIG_SMP
2632 int post_schedule = 0;
2633
2634 if (current->sched_class->needs_post_schedule)
2635 post_schedule = current->sched_class->needs_post_schedule(rq);
2636#endif
2588 2637
2589 rq->prev_mm = NULL; 2638 rq->prev_mm = NULL;
2590 2639
@@ -2603,7 +2652,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2603 finish_arch_switch(prev); 2652 finish_arch_switch(prev);
2604 finish_lock_switch(rq, prev); 2653 finish_lock_switch(rq, prev);
2605#ifdef CONFIG_SMP 2654#ifdef CONFIG_SMP
2606 if (current->sched_class->post_schedule) 2655 if (post_schedule)
2607 current->sched_class->post_schedule(rq); 2656 current->sched_class->post_schedule(rq);
2608#endif 2657#endif
2609 2658
@@ -2984,6 +3033,16 @@ next:
2984 pulled++; 3033 pulled++;
2985 rem_load_move -= p->se.load.weight; 3034 rem_load_move -= p->se.load.weight;
2986 3035
3036#ifdef CONFIG_PREEMPT
3037 /*
3038 * NEWIDLE balancing is a source of latency, so preemptible kernels
3039 * will stop after the first task is pulled to minimize the critical
3040 * section.
3041 */
3042 if (idle == CPU_NEWLY_IDLE)
3043 goto out;
3044#endif
3045
2987 /* 3046 /*
2988 * We only want to steal up to the prescribed amount of weighted load. 3047 * We only want to steal up to the prescribed amount of weighted load.
2989 */ 3048 */
@@ -3030,9 +3089,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3030 sd, idle, all_pinned, &this_best_prio); 3089 sd, idle, all_pinned, &this_best_prio);
3031 class = class->next; 3090 class = class->next;
3032 3091
3092#ifdef CONFIG_PREEMPT
3093 /*
3094 * NEWIDLE balancing is a source of latency, so preemptible
3095 * kernels will stop after the first task is pulled to minimize
3096 * the critical section.
3097 */
3033 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 3098 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3034 break; 3099 break;
3035 3100#endif
3036 } while (class && max_load_move > total_load_moved); 3101 } while (class && max_load_move > total_load_moved);
3037 3102
3038 return total_load_moved > 0; 3103 return total_load_moved > 0;
@@ -8201,11 +8266,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8201 __set_bit(MAX_RT_PRIO, array->bitmap); 8266 __set_bit(MAX_RT_PRIO, array->bitmap);
8202 8267
8203#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 8268#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
8204 rt_rq->highest_prio = MAX_RT_PRIO; 8269 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8270 rt_rq->highest_prio.next = MAX_RT_PRIO;
8205#endif 8271#endif
8206#ifdef CONFIG_SMP 8272#ifdef CONFIG_SMP
8207 rt_rq->rt_nr_migratory = 0; 8273 rt_rq->rt_nr_migratory = 0;
8208 rt_rq->overloaded = 0; 8274 rt_rq->overloaded = 0;
8275 plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
8209#endif 8276#endif
8210 8277
8211 rt_rq->rt_time = 0; 8278 rt_rq->rt_time = 0;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 954e1a81b796..18c7b5b3158a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -49,6 +49,24 @@ static void update_rt_migration(struct rq *rq)
49 rq->rt.overloaded = 0; 49 rq->rt.overloaded = 0;
50 } 50 }
51} 51}
52
53static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
54{
55 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
56 plist_node_init(&p->pushable_tasks, p->prio);
57 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
58}
59
60static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
61{
62 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
63}
64
65#else
66
67#define enqueue_pushable_task(rq, p) do { } while (0)
68#define dequeue_pushable_task(rq, p) do { } while (0)
69
52#endif /* CONFIG_SMP */ 70#endif /* CONFIG_SMP */
53 71
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 72static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
@@ -108,7 +126,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 126 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 127 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 128 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 129 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 130 resched_task(curr);
113 } 131 }
114} 132}
@@ -473,7 +491,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 491 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 492
475 if (rt_rq) 493 if (rt_rq)
476 return rt_rq->highest_prio; 494 return rt_rq->highest_prio.curr;
477#endif 495#endif
478 496
479 return rt_task_of(rt_se)->prio; 497 return rt_task_of(rt_se)->prio;
@@ -547,33 +565,64 @@ static void update_curr_rt(struct rq *rq)
547 } 565 }
548} 566}
549 567
568#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
569
570static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
571
572static inline int next_prio(struct rq *rq)
573{
574 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
575
576 if (next && rt_prio(next->prio))
577 return next->prio;
578 else
579 return MAX_RT_PRIO;
580}
581#endif
582
550static inline 583static inline
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 584void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
552{ 585{
553 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 586 int prio = rt_se_prio(rt_se);
554 rt_rq->rt_nr_running++;
555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
557#ifdef CONFIG_SMP 587#ifdef CONFIG_SMP
558 struct rq *rq = rq_of_rt_rq(rt_rq); 588 struct rq *rq = rq_of_rt_rq(rt_rq);
559#endif 589#endif
560 590
561 rt_rq->highest_prio = rt_se_prio(rt_se); 591 WARN_ON(!rt_prio(prio));
592 rt_rq->rt_nr_running++;
593#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
594 if (prio < rt_rq->highest_prio.curr) {
595
596 /*
597 * If the new task is higher in priority than anything on the
598 * run-queue, we have a new high that must be published to
599 * the world. We also know that the previous high becomes
600 * our next-highest.
601 */
602 rt_rq->highest_prio.next = rt_rq->highest_prio.curr;
603 rt_rq->highest_prio.curr = prio;
562#ifdef CONFIG_SMP 604#ifdef CONFIG_SMP
563 if (rq->online) 605 if (rq->online)
564 cpupri_set(&rq->rd->cpupri, rq->cpu, 606 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
565 rt_se_prio(rt_se));
566#endif 607#endif
567 } 608 } else if (prio == rt_rq->highest_prio.curr)
609 /*
610 * If the next task is equal in priority to the highest on
611 * the run-queue, then we implicitly know that the next highest
612 * task cannot be any lower than current
613 */
614 rt_rq->highest_prio.next = prio;
615 else if (prio < rt_rq->highest_prio.next)
616 /*
617 * Otherwise, we need to recompute next-highest
618 */
619 rt_rq->highest_prio.next = next_prio(rq);
568#endif 620#endif
569#ifdef CONFIG_SMP 621#ifdef CONFIG_SMP
570 if (rt_se->nr_cpus_allowed > 1) { 622 if (rt_se->nr_cpus_allowed > 1)
571 struct rq *rq = rq_of_rt_rq(rt_rq);
572
573 rq->rt.rt_nr_migratory++; 623 rq->rt.rt_nr_migratory++;
574 }
575 624
576 update_rt_migration(rq_of_rt_rq(rt_rq)); 625 update_rt_migration(rq);
577#endif 626#endif
578#ifdef CONFIG_RT_GROUP_SCHED 627#ifdef CONFIG_RT_GROUP_SCHED
579 if (rt_se_boosted(rt_se)) 628 if (rt_se_boosted(rt_se))
@@ -590,7 +639,8 @@ static inline
590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 639void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
591{ 640{
592#ifdef CONFIG_SMP 641#ifdef CONFIG_SMP
593 int highest_prio = rt_rq->highest_prio; 642 struct rq *rq = rq_of_rt_rq(rt_rq);
643 int highest_prio = rt_rq->highest_prio.curr;
594#endif 644#endif
595 645
596 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 646 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
@@ -598,33 +648,34 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
598 rt_rq->rt_nr_running--; 648 rt_rq->rt_nr_running--;
599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 649#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
600 if (rt_rq->rt_nr_running) { 650 if (rt_rq->rt_nr_running) {
601 struct rt_prio_array *array; 651 int prio = rt_se_prio(rt_se);
602 652
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 653 WARN_ON(prio < rt_rq->highest_prio.curr);
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 654
605 /* recalculate */ 655 /*
606 array = &rt_rq->active; 656 * This may have been our highest or next-highest priority
607 rt_rq->highest_prio = 657 * task and therefore we may have some recomputation to do
658 */
659 if (prio == rt_rq->highest_prio.curr) {
660 struct rt_prio_array *array = &rt_rq->active;
661
662 rt_rq->highest_prio.curr =
608 sched_find_first_bit(array->bitmap); 663 sched_find_first_bit(array->bitmap);
609 } /* otherwise leave rq->highest prio alone */ 664 }
665
666 if (prio <= rt_rq->highest_prio.next)
667 rt_rq->highest_prio.next = next_prio(rq);
610 } else 668 } else
611 rt_rq->highest_prio = MAX_RT_PRIO; 669 rt_rq->highest_prio.curr = MAX_RT_PRIO;
612#endif 670#endif
613#ifdef CONFIG_SMP 671#ifdef CONFIG_SMP
614 if (rt_se->nr_cpus_allowed > 1) { 672 if (rt_se->nr_cpus_allowed > 1)
615 struct rq *rq = rq_of_rt_rq(rt_rq);
616 rq->rt.rt_nr_migratory--; 673 rq->rt.rt_nr_migratory--;
617 }
618 674
619 if (rt_rq->highest_prio != highest_prio) { 675 if (rq->online && rt_rq->highest_prio.curr != highest_prio)
620 struct rq *rq = rq_of_rt_rq(rt_rq); 676 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
621 677
622 if (rq->online) 678 update_rt_migration(rq);
623 cpupri_set(&rq->rd->cpupri, rq->cpu,
624 rt_rq->highest_prio);
625 }
626
627 update_rt_migration(rq_of_rt_rq(rt_rq));
628#endif /* CONFIG_SMP */ 679#endif /* CONFIG_SMP */
629#ifdef CONFIG_RT_GROUP_SCHED 680#ifdef CONFIG_RT_GROUP_SCHED
630 if (rt_se_boosted(rt_se)) 681 if (rt_se_boosted(rt_se))
@@ -718,6 +769,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
718 769
719 enqueue_rt_entity(rt_se); 770 enqueue_rt_entity(rt_se);
720 771
772 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
773 enqueue_pushable_task(rq, p);
774
721 inc_cpu_load(rq, p->se.load.weight); 775 inc_cpu_load(rq, p->se.load.weight);
722} 776}
723 777
@@ -728,6 +782,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
728 update_curr_rt(rq); 782 update_curr_rt(rq);
729 dequeue_rt_entity(rt_se); 783 dequeue_rt_entity(rt_se);
730 784
785 dequeue_pushable_task(rq, p);
786
731 dec_cpu_load(rq, p->se.load.weight); 787 dec_cpu_load(rq, p->se.load.weight);
732} 788}
733 789
@@ -878,7 +934,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
878 return next; 934 return next;
879} 935}
880 936
881static struct task_struct *pick_next_task_rt(struct rq *rq) 937static struct task_struct *_pick_next_task_rt(struct rq *rq)
882{ 938{
883 struct sched_rt_entity *rt_se; 939 struct sched_rt_entity *rt_se;
884 struct task_struct *p; 940 struct task_struct *p;
@@ -900,6 +956,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
900 956
901 p = rt_task_of(rt_se); 957 p = rt_task_of(rt_se);
902 p->se.exec_start = rq->clock; 958 p->se.exec_start = rq->clock;
959
960 return p;
961}
962
963static struct task_struct *pick_next_task_rt(struct rq *rq)
964{
965 struct task_struct *p = _pick_next_task_rt(rq);
966
967 /* The running task is never eligible for pushing */
968 if (p)
969 dequeue_pushable_task(rq, p);
970
903 return p; 971 return p;
904} 972}
905 973
@@ -907,6 +975,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
907{ 975{
908 update_curr_rt(rq); 976 update_curr_rt(rq);
909 p->se.exec_start = 0; 977 p->se.exec_start = 0;
978
979 /*
980 * The previous task needs to be made eligible for pushing
981 * if it is still active
982 */
983 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
984 enqueue_pushable_task(rq, p);
910} 985}
911 986
912#ifdef CONFIG_SMP 987#ifdef CONFIG_SMP
@@ -1072,7 +1147,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1072 } 1147 }
1073 1148
1074 /* If this rq is still suitable use it. */ 1149 /* If this rq is still suitable use it. */
1075 if (lowest_rq->rt.highest_prio > task->prio) 1150 if (lowest_rq->rt.highest_prio.curr > task->prio)
1076 break; 1151 break;
1077 1152
1078 /* try again */ 1153 /* try again */
@@ -1083,6 +1158,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1083 return lowest_rq; 1158 return lowest_rq;
1084} 1159}
1085 1160
1161static inline int has_pushable_tasks(struct rq *rq)
1162{
1163 return !plist_head_empty(&rq->rt.pushable_tasks);
1164}
1165
1166static struct task_struct *pick_next_pushable_task(struct rq *rq)
1167{
1168 struct task_struct *p;
1169
1170 if (!has_pushable_tasks(rq))
1171 return NULL;
1172
1173 p = plist_first_entry(&rq->rt.pushable_tasks,
1174 struct task_struct, pushable_tasks);
1175
1176 BUG_ON(rq->cpu != task_cpu(p));
1177 BUG_ON(task_current(rq, p));
1178 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1179
1180 BUG_ON(!p->se.on_rq);
1181 BUG_ON(!rt_task(p));
1182
1183 return p;
1184}
1185
1086/* 1186/*
1087 * If the current CPU has more than one RT task, see if the non 1187 * If the current CPU has more than one RT task, see if the non
1088 * running task can migrate over to a CPU that is running a task 1188 * running task can migrate over to a CPU that is running a task
@@ -1092,13 +1192,11 @@ static int push_rt_task(struct rq *rq)
1092{ 1192{
1093 struct task_struct *next_task; 1193 struct task_struct *next_task;
1094 struct rq *lowest_rq; 1194 struct rq *lowest_rq;
1095 int ret = 0;
1096 int paranoid = RT_MAX_TRIES;
1097 1195
1098 if (!rq->rt.overloaded) 1196 if (!rq->rt.overloaded)
1099 return 0; 1197 return 0;
1100 1198
1101 next_task = pick_next_highest_task_rt(rq, -1); 1199 next_task = pick_next_pushable_task(rq);
1102 if (!next_task) 1200 if (!next_task)
1103 return 0; 1201 return 0;
1104 1202
@@ -1127,16 +1225,34 @@ static int push_rt_task(struct rq *rq)
1127 struct task_struct *task; 1225 struct task_struct *task;
1128 /* 1226 /*
1129 * find lock_lowest_rq releases rq->lock 1227 * find lock_lowest_rq releases rq->lock
1130 * so it is possible that next_task has changed. 1228 * so it is possible that next_task has migrated.
1131 * If it has, then try again. 1229 *
1230 * We need to make sure that the task is still on the same
1231 * run-queue and is also still the next task eligible for
1232 * pushing.
1132 */ 1233 */
1133 task = pick_next_highest_task_rt(rq, -1); 1234 task = pick_next_pushable_task(rq);
1134 if (unlikely(task != next_task) && task && paranoid--) { 1235 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1135 put_task_struct(next_task); 1236 /*
1136 next_task = task; 1237 * If we get here, the task hasnt moved at all, but
1137 goto retry; 1238 * it has failed to push. We will not try again,
1239 * since the other cpus will pull from us when they
1240 * are ready.
1241 */
1242 dequeue_pushable_task(rq, next_task);
1243 goto out;
1138 } 1244 }
1139 goto out; 1245
1246 if (!task)
1247 /* No more tasks, just exit */
1248 goto out;
1249
1250 /*
1251 * Something has shifted, try again.
1252 */
1253 put_task_struct(next_task);
1254 next_task = task;
1255 goto retry;
1140 } 1256 }
1141 1257
1142 deactivate_task(rq, next_task, 0); 1258 deactivate_task(rq, next_task, 0);
@@ -1147,23 +1263,12 @@ static int push_rt_task(struct rq *rq)
1147 1263
1148 double_unlock_balance(rq, lowest_rq); 1264 double_unlock_balance(rq, lowest_rq);
1149 1265
1150 ret = 1;
1151out: 1266out:
1152 put_task_struct(next_task); 1267 put_task_struct(next_task);
1153 1268
1154 return ret; 1269 return 1;
1155} 1270}
1156 1271
1157/*
1158 * TODO: Currently we just use the second highest prio task on
1159 * the queue, and stop when it can't migrate (or there's
1160 * no more RT tasks). There may be a case where a lower
1161 * priority RT task has a different affinity than the
1162 * higher RT task. In this case the lower RT task could
1163 * possibly be able to migrate where as the higher priority
1164 * RT task could not. We currently ignore this issue.
1165 * Enhancements are welcome!
1166 */
1167static void push_rt_tasks(struct rq *rq) 1272static void push_rt_tasks(struct rq *rq)
1168{ 1273{
1169 /* push_rt_task will return true if it moved an RT */ 1274 /* push_rt_task will return true if it moved an RT */
@@ -1174,33 +1279,35 @@ static void push_rt_tasks(struct rq *rq)
1174static int pull_rt_task(struct rq *this_rq) 1279static int pull_rt_task(struct rq *this_rq)
1175{ 1280{
1176 int this_cpu = this_rq->cpu, ret = 0, cpu; 1281 int this_cpu = this_rq->cpu, ret = 0, cpu;
1177 struct task_struct *p, *next; 1282 struct task_struct *p;
1178 struct rq *src_rq; 1283 struct rq *src_rq;
1179 1284
1180 if (likely(!rt_overloaded(this_rq))) 1285 if (likely(!rt_overloaded(this_rq)))
1181 return 0; 1286 return 0;
1182 1287
1183 next = pick_next_task_rt(this_rq);
1184
1185 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1288 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1186 if (this_cpu == cpu) 1289 if (this_cpu == cpu)
1187 continue; 1290 continue;
1188 1291
1189 src_rq = cpu_rq(cpu); 1292 src_rq = cpu_rq(cpu);
1293
1294 /*
1295 * Don't bother taking the src_rq->lock if the next highest
1296 * task is known to be lower-priority than our current task.
1297 * This may look racy, but if this value is about to go
1298 * logically higher, the src_rq will push this task away.
1299 * And if its going logically lower, we do not care
1300 */
1301 if (src_rq->rt.highest_prio.next >=
1302 this_rq->rt.highest_prio.curr)
1303 continue;
1304
1190 /* 1305 /*
1191 * We can potentially drop this_rq's lock in 1306 * We can potentially drop this_rq's lock in
1192 * double_lock_balance, and another CPU could 1307 * double_lock_balance, and another CPU could
1193 * steal our next task - hence we must cause 1308 * alter this_rq
1194 * the caller to recalculate the next task
1195 * in that case:
1196 */ 1309 */
1197 if (double_lock_balance(this_rq, src_rq)) { 1310 double_lock_balance(this_rq, src_rq);
1198 struct task_struct *old_next = next;
1199
1200 next = pick_next_task_rt(this_rq);
1201 if (next != old_next)
1202 ret = 1;
1203 }
1204 1311
1205 /* 1312 /*
1206 * Are there still pullable RT tasks? 1313 * Are there still pullable RT tasks?
@@ -1214,7 +1321,7 @@ static int pull_rt_task(struct rq *this_rq)
1214 * Do we have an RT task that preempts 1321 * Do we have an RT task that preempts
1215 * the to-be-scheduled task? 1322 * the to-be-scheduled task?
1216 */ 1323 */
1217 if (p && (!next || (p->prio < next->prio))) { 1324 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1218 WARN_ON(p == src_rq->curr); 1325 WARN_ON(p == src_rq->curr);
1219 WARN_ON(!p->se.on_rq); 1326 WARN_ON(!p->se.on_rq);
1220 1327
@@ -1224,12 +1331,9 @@ static int pull_rt_task(struct rq *this_rq)
1224 * This is just that p is wakeing up and hasn't 1331 * This is just that p is wakeing up and hasn't
1225 * had a chance to schedule. We only pull 1332 * had a chance to schedule. We only pull
1226 * p if it is lower in priority than the 1333 * p if it is lower in priority than the
1227 * current task on the run queue or 1334 * current task on the run queue
1228 * this_rq next task is lower in prio than
1229 * the current task on that rq.
1230 */ 1335 */
1231 if (p->prio < src_rq->curr->prio || 1336 if (p->prio < src_rq->curr->prio)
1232 (next && next->prio < src_rq->curr->prio))
1233 goto skip; 1337 goto skip;
1234 1338
1235 ret = 1; 1339 ret = 1;
@@ -1242,13 +1346,7 @@ static int pull_rt_task(struct rq *this_rq)
1242 * case there's an even higher prio task 1346 * case there's an even higher prio task
1243 * in another runqueue. (low likelyhood 1347 * in another runqueue. (low likelyhood
1244 * but possible) 1348 * but possible)
1245 *
1246 * Update next so that we won't pick a task
1247 * on another cpu with a priority lower (or equal)
1248 * than the one we just picked.
1249 */ 1349 */
1250 next = p;
1251
1252 } 1350 }
1253 skip: 1351 skip:
1254 double_unlock_balance(this_rq, src_rq); 1352 double_unlock_balance(this_rq, src_rq);
@@ -1260,24 +1358,27 @@ static int pull_rt_task(struct rq *this_rq)
1260static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1358static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1261{ 1359{
1262 /* Try to pull RT tasks here if we lower this rq's prio */ 1360 /* Try to pull RT tasks here if we lower this rq's prio */
1263 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1361 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1264 pull_rt_task(rq); 1362 pull_rt_task(rq);
1265} 1363}
1266 1364
1365/*
1366 * assumes rq->lock is held
1367 */
1368static int needs_post_schedule_rt(struct rq *rq)
1369{
1370 return has_pushable_tasks(rq);
1371}
1372
1267static void post_schedule_rt(struct rq *rq) 1373static void post_schedule_rt(struct rq *rq)
1268{ 1374{
1269 /* 1375 /*
1270 * If we have more than one rt_task queued, then 1376 * This is only called if needs_post_schedule_rt() indicates that
1271 * see if we can push the other rt_tasks off to other CPUS. 1377 * we need to push tasks away
1272 * Note we may release the rq lock, and since
1273 * the lock was owned by prev, we need to release it
1274 * first via finish_lock_switch and then reaquire it here.
1275 */ 1378 */
1276 if (unlikely(rq->rt.overloaded)) { 1379 spin_lock_irq(&rq->lock);
1277 spin_lock_irq(&rq->lock); 1380 push_rt_tasks(rq);
1278 push_rt_tasks(rq); 1381 spin_unlock_irq(&rq->lock);
1279 spin_unlock_irq(&rq->lock);
1280 }
1281} 1382}
1282 1383
1283/* 1384/*
@@ -1288,7 +1389,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1288{ 1389{
1289 if (!task_running(rq, p) && 1390 if (!task_running(rq, p) &&
1290 !test_tsk_need_resched(rq->curr) && 1391 !test_tsk_need_resched(rq->curr) &&
1291 rq->rt.overloaded) 1392 has_pushable_tasks(rq) &&
1393 p->rt.nr_cpus_allowed > 1)
1292 push_rt_tasks(rq); 1394 push_rt_tasks(rq);
1293} 1395}
1294 1396
@@ -1324,6 +1426,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1324 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1426 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1325 struct rq *rq = task_rq(p); 1427 struct rq *rq = task_rq(p);
1326 1428
1429 if (!task_current(rq, p)) {
1430 /*
1431 * Make sure we dequeue this task from the pushable list
1432 * before going further. It will either remain off of
1433 * the list because we are no longer pushable, or it
1434 * will be requeued.
1435 */
1436 if (p->rt.nr_cpus_allowed > 1)
1437 dequeue_pushable_task(rq, p);
1438
1439 /*
1440 * Requeue if our weight is changing and still > 1
1441 */
1442 if (weight > 1)
1443 enqueue_pushable_task(rq, p);
1444
1445 }
1446
1327 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1447 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1328 rq->rt.rt_nr_migratory++; 1448 rq->rt.rt_nr_migratory++;
1329 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1449 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1346,7 +1466,7 @@ static void rq_online_rt(struct rq *rq)
1346 1466
1347 __enable_runtime(rq); 1467 __enable_runtime(rq);
1348 1468
1349 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1469 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1350} 1470}
1351 1471
1352/* Assumes rq->lock is held */ 1472/* Assumes rq->lock is held */
@@ -1438,7 +1558,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1438 * can release the rq lock and p could migrate. 1558 * can release the rq lock and p could migrate.
1439 * Only reschedule if p is still on the same runqueue. 1559 * Only reschedule if p is still on the same runqueue.
1440 */ 1560 */
1441 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1561 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1442 resched_task(p); 1562 resched_task(p);
1443#else 1563#else
1444 /* For UP simply resched on drop of prio */ 1564 /* For UP simply resched on drop of prio */
@@ -1509,6 +1629,9 @@ static void set_curr_task_rt(struct rq *rq)
1509 struct task_struct *p = rq->curr; 1629 struct task_struct *p = rq->curr;
1510 1630
1511 p->se.exec_start = rq->clock; 1631 p->se.exec_start = rq->clock;
1632
1633 /* The running task is never eligible for pushing */
1634 dequeue_pushable_task(rq, p);
1512} 1635}
1513 1636
1514static const struct sched_class rt_sched_class = { 1637static const struct sched_class rt_sched_class = {
@@ -1531,6 +1654,7 @@ static const struct sched_class rt_sched_class = {
1531 .rq_online = rq_online_rt, 1654 .rq_online = rq_online_rt,
1532 .rq_offline = rq_offline_rt, 1655 .rq_offline = rq_offline_rt,
1533 .pre_schedule = pre_schedule_rt, 1656 .pre_schedule = pre_schedule_rt,
1657 .needs_post_schedule = needs_post_schedule_rt,
1534 .post_schedule = post_schedule_rt, 1658 .post_schedule = post_schedule_rt,
1535 .task_wake_up = task_wake_up_rt, 1659 .task_wake_up = task_wake_up_rt,
1536 .switched_from = switched_from_rt, 1660 .switched_from = switched_from_rt,