aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c96
1 files changed, 60 insertions, 36 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 9bf0d2a7304..a4d790cddb1 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,13 +3,18 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6#ifdef CONFIG_RT_GROUP_SCHED
7
8#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 10static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{ 11{
12#ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14#endif
8 return container_of(rt_se, struct task_struct, rt); 15 return container_of(rt_se, struct task_struct, rt);
9} 16}
10 17
11#ifdef CONFIG_RT_GROUP_SCHED
12
13static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 18static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
14{ 19{
15 return rt_rq->rq; 20 return rt_rq->rq;
@@ -22,6 +27,13 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
22 27
23#else /* CONFIG_RT_GROUP_SCHED */ 28#else /* CONFIG_RT_GROUP_SCHED */
24 29
30#define rt_entity_is_task(rt_se) (1)
31
32static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33{
34 return container_of(rt_se, struct task_struct, rt);
35}
36
25static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 37static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
26{ 38{
27 return container_of(rt_rq, struct rq, rt); 39 return container_of(rt_rq, struct rq, rt);
@@ -73,7 +85,7 @@ static inline void rt_clear_overload(struct rq *rq)
73 85
74static void update_rt_migration(struct rt_rq *rt_rq) 86static void update_rt_migration(struct rt_rq *rt_rq)
75{ 87{
76 if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) { 88 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
77 if (!rt_rq->overloaded) { 89 if (!rt_rq->overloaded) {
78 rt_set_overload(rq_of_rt_rq(rt_rq)); 90 rt_set_overload(rq_of_rt_rq(rt_rq));
79 rt_rq->overloaded = 1; 91 rt_rq->overloaded = 1;
@@ -86,6 +98,12 @@ static void update_rt_migration(struct rt_rq *rt_rq)
86 98
87static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 99static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
88{ 100{
101 if (!rt_entity_is_task(rt_se))
102 return;
103
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106 rt_rq->rt_nr_total++;
89 if (rt_se->nr_cpus_allowed > 1) 107 if (rt_se->nr_cpus_allowed > 1)
90 rt_rq->rt_nr_migratory++; 108 rt_rq->rt_nr_migratory++;
91 109
@@ -94,6 +112,12 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
94 112
95static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 113static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
96{ 114{
115 if (!rt_entity_is_task(rt_se))
116 return;
117
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120 rt_rq->rt_nr_total--;
97 if (rt_se->nr_cpus_allowed > 1) 121 if (rt_se->nr_cpus_allowed > 1)
98 rt_rq->rt_nr_migratory--; 122 rt_rq->rt_nr_migratory--;
99 123
@@ -112,6 +136,11 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
112 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
113} 137}
114 138
139static inline int has_pushable_tasks(struct rq *rq)
140{
141 return !plist_head_empty(&rq->rt.pushable_tasks);
142}
143
115#else 144#else
116 145
117static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 146static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
@@ -586,6 +615,8 @@ static void update_curr_rt(struct rq *rq)
586 curr->se.exec_start = rq->clock; 615 curr->se.exec_start = rq->clock;
587 cpuacct_charge(curr, delta_exec); 616 cpuacct_charge(curr, delta_exec);
588 617
618 sched_rt_avg_update(rq, delta_exec);
619
589 if (!rt_bandwidth_enabled()) 620 if (!rt_bandwidth_enabled())
590 return; 621 return;
591 622
@@ -858,8 +889,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
858 889
859 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 890 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
860 enqueue_pushable_task(rq, p); 891 enqueue_pushable_task(rq, p);
861
862 inc_cpu_load(rq, p->se.load.weight);
863} 892}
864 893
865static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 894static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -870,8 +899,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
870 dequeue_rt_entity(rt_se); 899 dequeue_rt_entity(rt_se);
871 900
872 dequeue_pushable_task(rq, p); 901 dequeue_pushable_task(rq, p);
873
874 dec_cpu_load(rq, p->se.load.weight);
875} 902}
876 903
877/* 904/*
@@ -911,10 +938,13 @@ static void yield_task_rt(struct rq *rq)
911#ifdef CONFIG_SMP 938#ifdef CONFIG_SMP
912static int find_lowest_rq(struct task_struct *task); 939static int find_lowest_rq(struct task_struct *task);
913 940
914static int select_task_rq_rt(struct task_struct *p, int sync) 941static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
915{ 942{
916 struct rq *rq = task_rq(p); 943 struct rq *rq = task_rq(p);
917 944
945 if (sd_flag != SD_BALANCE_WAKE)
946 return smp_processor_id();
947
918 /* 948 /*
919 * If the current task is an RT task, then 949 * If the current task is an RT task, then
920 * try to see if we can wake this RT task up on another 950 * try to see if we can wake this RT task up on another
@@ -972,7 +1002,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
972/* 1002/*
973 * Preempt the current task with a newly woken task if needed: 1003 * Preempt the current task with a newly woken task if needed:
974 */ 1004 */
975static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync) 1005static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
976{ 1006{
977 if (p->prio < rq->curr->prio) { 1007 if (p->prio < rq->curr->prio) {
978 resched_task(rq->curr); 1008 resched_task(rq->curr);
@@ -1048,6 +1078,14 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
1048 if (p) 1078 if (p)
1049 dequeue_pushable_task(rq, p); 1079 dequeue_pushable_task(rq, p);
1050 1080
1081#ifdef CONFIG_SMP
1082 /*
1083 * We detect this state here so that we can avoid taking the RQ
1084 * lock again later if there is no need to push
1085 */
1086 rq->post_schedule = has_pushable_tasks(rq);
1087#endif
1088
1051 return p; 1089 return p;
1052} 1090}
1053 1091
@@ -1146,13 +1184,6 @@ static int find_lowest_rq(struct task_struct *task)
1146 return -1; /* No targets found */ 1184 return -1; /* No targets found */
1147 1185
1148 /* 1186 /*
1149 * Only consider CPUs that are usable for migration.
1150 * I guess we might want to change cpupri_find() to ignore those
1151 * in the first place.
1152 */
1153 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
1154
1155 /*
1156 * At this point we have built a mask of cpus representing the 1187 * At this point we have built a mask of cpus representing the
1157 * lowest priority tasks in the system. Now we want to elect 1188 * lowest priority tasks in the system. Now we want to elect
1158 * the best one based on our affinity and topology. 1189 * the best one based on our affinity and topology.
@@ -1246,11 +1277,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1246 return lowest_rq; 1277 return lowest_rq;
1247} 1278}
1248 1279
1249static inline int has_pushable_tasks(struct rq *rq)
1250{
1251 return !plist_head_empty(&rq->rt.pushable_tasks);
1252}
1253
1254static struct task_struct *pick_next_pushable_task(struct rq *rq) 1280static struct task_struct *pick_next_pushable_task(struct rq *rq)
1255{ 1281{
1256 struct task_struct *p; 1282 struct task_struct *p;
@@ -1450,23 +1476,9 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1450 pull_rt_task(rq); 1476 pull_rt_task(rq);
1451} 1477}
1452 1478
1453/*
1454 * assumes rq->lock is held
1455 */
1456static int needs_post_schedule_rt(struct rq *rq)
1457{
1458 return has_pushable_tasks(rq);
1459}
1460
1461static void post_schedule_rt(struct rq *rq) 1479static void post_schedule_rt(struct rq *rq)
1462{ 1480{
1463 /*
1464 * This is only called if needs_post_schedule_rt() indicates that
1465 * we need to push tasks away
1466 */
1467 spin_lock_irq(&rq->lock);
1468 push_rt_tasks(rq); 1481 push_rt_tasks(rq);
1469 spin_unlock_irq(&rq->lock);
1470} 1482}
1471 1483
1472/* 1484/*
@@ -1722,6 +1734,17 @@ static void set_curr_task_rt(struct rq *rq)
1722 dequeue_pushable_task(rq, p); 1734 dequeue_pushable_task(rq, p);
1723} 1735}
1724 1736
1737unsigned int get_rr_interval_rt(struct task_struct *task)
1738{
1739 /*
1740 * Time slice is 0 for SCHED_FIFO tasks
1741 */
1742 if (task->policy == SCHED_RR)
1743 return DEF_TIMESLICE;
1744 else
1745 return 0;
1746}
1747
1725static const struct sched_class rt_sched_class = { 1748static const struct sched_class rt_sched_class = {
1726 .next = &fair_sched_class, 1749 .next = &fair_sched_class,
1727 .enqueue_task = enqueue_task_rt, 1750 .enqueue_task = enqueue_task_rt,
@@ -1742,7 +1765,6 @@ static const struct sched_class rt_sched_class = {
1742 .rq_online = rq_online_rt, 1765 .rq_online = rq_online_rt,
1743 .rq_offline = rq_offline_rt, 1766 .rq_offline = rq_offline_rt,
1744 .pre_schedule = pre_schedule_rt, 1767 .pre_schedule = pre_schedule_rt,
1745 .needs_post_schedule = needs_post_schedule_rt,
1746 .post_schedule = post_schedule_rt, 1768 .post_schedule = post_schedule_rt,
1747 .task_wake_up = task_wake_up_rt, 1769 .task_wake_up = task_wake_up_rt,
1748 .switched_from = switched_from_rt, 1770 .switched_from = switched_from_rt,
@@ -1751,6 +1773,8 @@ static const struct sched_class rt_sched_class = {
1751 .set_curr_task = set_curr_task_rt, 1773 .set_curr_task = set_curr_task_rt,
1752 .task_tick = task_tick_rt, 1774 .task_tick = task_tick_rt,
1753 1775
1776 .get_rr_interval = get_rr_interval_rt,
1777
1754 .prio_changed = prio_changed_rt, 1778 .prio_changed = prio_changed_rt,
1755 .switched_to = switched_to_rt, 1779 .switched_to = switched_to_rt,
1756}; 1780};