aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c110
1 files changed, 94 insertions, 16 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f48328ac216f..0e4b15d2a08c 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -194,7 +194,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
194 return rt_se->my_q; 194 return rt_se->my_q;
195} 195}
196 196
197static void enqueue_rt_entity(struct sched_rt_entity *rt_se); 197static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
198static void dequeue_rt_entity(struct sched_rt_entity *rt_se); 198static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
199 199
200static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 200static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
@@ -204,7 +204,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
204 204
205 if (rt_rq->rt_nr_running) { 205 if (rt_rq->rt_nr_running) {
206 if (rt_se && !on_rt_rq(rt_se)) 206 if (rt_se && !on_rt_rq(rt_se))
207 enqueue_rt_entity(rt_se); 207 enqueue_rt_entity(rt_se, false);
208 if (rt_rq->highest_prio.curr < curr->prio) 208 if (rt_rq->highest_prio.curr < curr->prio)
209 resched_task(curr); 209 resched_task(curr);
210 } 210 }
@@ -580,6 +580,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
580 580
581 if (rt_rq->rt_time > runtime) { 581 if (rt_rq->rt_time > runtime) {
582 rt_rq->rt_throttled = 1; 582 rt_rq->rt_throttled = 1;
583 printk_once(KERN_WARNING "sched: RT throttling activated\n");
583 if (rt_rq_throttled(rt_rq)) { 584 if (rt_rq_throttled(rt_rq)) {
584 sched_rt_rq_dequeue(rt_rq); 585 sched_rt_rq_dequeue(rt_rq);
585 return 1; 586 return 1;
@@ -783,7 +784,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
783{ 784{
784 int prio = rt_se_prio(rt_se); 785 int prio = rt_se_prio(rt_se);
785 786
786 WARN_ON(!rt_prio(prio)); 787 BUG_ON(!rt_prio(prio));
787 rt_rq->rt_nr_running++; 788 rt_rq->rt_nr_running++;
788 789
789 inc_rt_prio(rt_rq, prio); 790 inc_rt_prio(rt_rq, prio);
@@ -794,7 +795,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
794static inline 795static inline
795void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 796void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
796{ 797{
797 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 798 BUG_ON(!rt_prio(rt_se_prio(rt_se)));
798 WARN_ON(!rt_rq->rt_nr_running); 799 WARN_ON(!rt_rq->rt_nr_running);
799 rt_rq->rt_nr_running--; 800 rt_rq->rt_nr_running--;
800 801
@@ -803,7 +804,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
803 dec_rt_group(rt_se, rt_rq); 804 dec_rt_group(rt_se, rt_rq);
804} 805}
805 806
806static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) 807static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
807{ 808{
808 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 809 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
809 struct rt_prio_array *array = &rt_rq->active; 810 struct rt_prio_array *array = &rt_rq->active;
@@ -819,7 +820,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
819 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) 820 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
820 return; 821 return;
821 822
822 list_add_tail(&rt_se->run_list, queue); 823 if (head)
824 list_add(&rt_se->run_list, queue);
825 else
826 list_add_tail(&rt_se->run_list, queue);
823 __set_bit(rt_se_prio(rt_se), array->bitmap); 827 __set_bit(rt_se_prio(rt_se), array->bitmap);
824 828
825 inc_rt_tasks(rt_se, rt_rq); 829 inc_rt_tasks(rt_se, rt_rq);
@@ -856,11 +860,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
856 } 860 }
857} 861}
858 862
859static void enqueue_rt_entity(struct sched_rt_entity *rt_se) 863static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
860{ 864{
861 dequeue_rt_stack(rt_se); 865 dequeue_rt_stack(rt_se);
862 for_each_sched_rt_entity(rt_se) 866 for_each_sched_rt_entity(rt_se)
863 __enqueue_rt_entity(rt_se); 867 __enqueue_rt_entity(rt_se, head);
864} 868}
865 869
866static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 870static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
@@ -871,21 +875,74 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
871 struct rt_rq *rt_rq = group_rt_rq(rt_se); 875 struct rt_rq *rt_rq = group_rt_rq(rt_se);
872 876
873 if (rt_rq && rt_rq->rt_nr_running) 877 if (rt_rq && rt_rq->rt_nr_running)
874 __enqueue_rt_entity(rt_se); 878 __enqueue_rt_entity(rt_se, false);
875 } 879 }
876} 880}
877 881
882static inline void incr_rt_nr_uninterruptible(struct task_struct *p,
883 struct rq *rq)
884{
885 rq->rt.rt_nr_uninterruptible++;
886}
887
888static inline void decr_rt_nr_uninterruptible(struct task_struct *p,
889 struct rq *rq)
890{
891 rq->rt.rt_nr_uninterruptible--;
892}
893
894unsigned long rt_nr_running(void)
895{
896 unsigned long i, sum = 0;
897
898 for_each_online_cpu(i)
899 sum += cpu_rq(i)->rt.rt_nr_running;
900
901 return sum;
902}
903
904unsigned long rt_nr_running_cpu(int cpu)
905{
906 return cpu_rq(cpu)->rt.rt_nr_running;
907}
908
909unsigned long rt_nr_uninterruptible(void)
910{
911 unsigned long i, sum = 0;
912
913 for_each_online_cpu(i)
914 sum += cpu_rq(i)->rt.rt_nr_uninterruptible;
915
916 /*
917 * Since we read the counters lockless, it might be slightly
918 * inaccurate. Do not allow it to go below zero though:
919 */
920 if (unlikely((long)sum < 0))
921 sum = 0;
922
923 return sum;
924}
925
926unsigned long rt_nr_uninterruptible_cpu(int cpu)
927{
928 return cpu_rq(cpu)->rt.rt_nr_uninterruptible;
929}
930
878/* 931/*
879 * Adding/removing a task to/from a priority array: 932 * Adding/removing a task to/from a priority array:
880 */ 933 */
881static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) 934static void
935enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
882{ 936{
883 struct sched_rt_entity *rt_se = &p->rt; 937 struct sched_rt_entity *rt_se = &p->rt;
884 938
885 if (wakeup) 939 if (wakeup)
886 rt_se->timeout = 0; 940 rt_se->timeout = 0;
887 941
888 enqueue_rt_entity(rt_se); 942 enqueue_rt_entity(rt_se, head);
943
944 if (p->state == TASK_UNINTERRUPTIBLE)
945 decr_rt_nr_uninterruptible(p, rq);
889 946
890 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 947 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
891 enqueue_pushable_task(rq, p); 948 enqueue_pushable_task(rq, p);
@@ -896,6 +953,10 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
896 struct sched_rt_entity *rt_se = &p->rt; 953 struct sched_rt_entity *rt_se = &p->rt;
897 954
898 update_curr_rt(rq); 955 update_curr_rt(rq);
956
957 if (p->state == TASK_UNINTERRUPTIBLE)
958 incr_rt_nr_uninterruptible(p, rq);
959
899 dequeue_rt_entity(rt_se); 960 dequeue_rt_entity(rt_se);
900 961
901 dequeue_pushable_task(rq, p); 962 dequeue_pushable_task(rq, p);
@@ -970,6 +1031,17 @@ static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
970 } 1031 }
971 1032
972 /* 1033 /*
1034 * If the new task is an RT task, current is not an RT task
1035 * and the new one may run on the current CPU, run it here.
1036 * This avoids sending reschedule IPIs across CPUs.
1037 */
1038 if (unlikely(rt_task(p)) && !rt_task(rq->curr)) {
1039 int cpu = smp_processor_id();
1040 if (cpumask_test_cpu(cpu, &p->cpus_allowed))
1041 return cpu;
1042 }
1043
1044 /*
973 * Otherwise, just let it ride on the affined RQ and the 1045 * Otherwise, just let it ride on the affined RQ and the
974 * post-schedule router will push the preempted task away 1046 * post-schedule router will push the preempted task away
975 */ 1047 */
@@ -1136,7 +1208,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1136 if (next && next->prio < idx) 1208 if (next && next->prio < idx)
1137 continue; 1209 continue;
1138 list_for_each_entry(rt_se, array->queue + idx, run_list) { 1210 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1139 struct task_struct *p = rt_task_of(rt_se); 1211 struct task_struct *p;
1212
1213 if (!rt_entity_is_task(rt_se))
1214 continue;
1215
1216 p = rt_task_of(rt_se);
1140 if (pick_rt_task(rq, p, cpu)) { 1217 if (pick_rt_task(rq, p, cpu)) {
1141 next = p; 1218 next = p;
1142 break; 1219 break;
@@ -1358,7 +1435,7 @@ static int push_rt_task(struct rq *rq)
1358 1435
1359 deactivate_task(rq, next_task, 0); 1436 deactivate_task(rq, next_task, 0);
1360 set_task_cpu(next_task, lowest_rq->cpu); 1437 set_task_cpu(next_task, lowest_rq->cpu);
1361 activate_task(lowest_rq, next_task, 0); 1438 activate_task(lowest_rq, next_task, 0, false);
1362 1439
1363 resched_task(lowest_rq->curr); 1440 resched_task(lowest_rq->curr);
1364 1441
@@ -1441,7 +1518,7 @@ static int pull_rt_task(struct rq *this_rq)
1441 1518
1442 deactivate_task(src_rq, p, 0); 1519 deactivate_task(src_rq, p, 0);
1443 set_task_cpu(p, this_cpu); 1520 set_task_cpu(p, this_cpu);
1444 activate_task(this_rq, p, 0); 1521 activate_task(this_rq, p, 0, false);
1445 /* 1522 /*
1446 * We continue with the search, just in 1523 * We continue with the search, just in
1447 * case there's an even higher prio task 1524 * case there's an even higher prio task
@@ -1459,8 +1536,10 @@ static int pull_rt_task(struct rq *this_rq)
1459static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1536static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1460{ 1537{
1461 /* Try to pull RT tasks here if we lower this rq's prio */ 1538 /* Try to pull RT tasks here if we lower this rq's prio */
1462 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) 1539 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) {
1463 pull_rt_task(rq); 1540 pull_rt_task(rq);
1541 schedstat_inc(rq, rto_schedule);
1542 }
1464} 1543}
1465 1544
1466static void post_schedule_rt(struct rq *rq) 1545static void post_schedule_rt(struct rq *rq)
@@ -1528,7 +1607,6 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1528 */ 1607 */
1529 if (weight > 1) 1608 if (weight > 1)
1530 enqueue_pushable_task(rq, p); 1609 enqueue_pushable_task(rq, p);
1531
1532 } 1610 }
1533 1611
1534 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1612 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {