aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-13 09:45:39 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-13 09:45:39 -0500
commit23b0fdfc9299b137bd126e9dc22f62a59dae546d (patch)
tree22019172c555109b69a73da76561d99d7776c4f7 /kernel
parent4cf5d77a6eefaa7a464bc34e8cb767356f10fd74 (diff)
sched: rt-group: deal with PI
Steven mentioned the fun case where a lock holding task will be throttled. Simple fix: allow groups that have boosted tasks to run anyway. If a runnable task in a throttled group gets boosted the dequeue/enqueue done by rt_mutex_setprio() is enough to unthrottle the group. This is ofcourse not quite correct. Two possible ways forward are: - second prio array for boosted tasks - boost to a prio ceiling (this would also work for deadline scheduling) Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sched_rt.c43
2 files changed, 41 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 88a17c7128c3..cecaea67ae9b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -362,6 +362,8 @@ struct rt_rq {
362 u64 rt_time; 362 u64 rt_time;
363 363
364#ifdef CONFIG_FAIR_GROUP_SCHED 364#ifdef CONFIG_FAIR_GROUP_SCHED
365 unsigned long rt_nr_boosted;
366
365 struct rq *rq; 367 struct rq *rq;
366 struct list_head leaf_rt_rq_list; 368 struct list_head leaf_rt_rq_list;
367 struct task_group *tg; 369 struct task_group *tg;
@@ -7112,6 +7114,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7112 rt_rq->rt_throttled = 0; 7114 rt_rq->rt_throttled = 0;
7113 7115
7114#ifdef CONFIG_FAIR_GROUP_SCHED 7116#ifdef CONFIG_FAIR_GROUP_SCHED
7117 rt_rq->rt_nr_boosted = 0;
7115 rt_rq->rq = rq; 7118 rt_rq->rq = rq;
7116#endif 7119#endif
7117} 7120}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 274b40d7bef2..8d4269381239 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -110,6 +110,23 @@ static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
110 dequeue_rt_entity(rt_se); 110 dequeue_rt_entity(rt_se);
111} 111}
112 112
113static inline int rt_rq_throttled(struct rt_rq *rt_rq)
114{
115 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
116}
117
118static int rt_se_boosted(struct sched_rt_entity *rt_se)
119{
120 struct rt_rq *rt_rq = group_rt_rq(rt_se);
121 struct task_struct *p;
122
123 if (rt_rq)
124 return !!rt_rq->rt_nr_boosted;
125
126 p = rt_task_of(rt_se);
127 return p->prio != p->normal_prio;
128}
129
113#else 130#else
114 131
115static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) 132static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
@@ -149,6 +166,10 @@ static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
149{ 166{
150} 167}
151 168
169static inline int rt_rq_throttled(struct rt_rq *rt_rq)
170{
171 return rt_rq->rt_throttled;
172}
152#endif 173#endif
153 174
154static inline int rt_se_prio(struct sched_rt_entity *rt_se) 175static inline int rt_se_prio(struct sched_rt_entity *rt_se)
@@ -172,7 +193,7 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
172 return 0; 193 return 0;
173 194
174 if (rt_rq->rt_throttled) 195 if (rt_rq->rt_throttled)
175 return 1; 196 return rt_rq_throttled(rt_rq);
176 197
177 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; 198 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
178 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; 199 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
@@ -183,8 +204,10 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
183 rq->rt_throttled = 1; 204 rq->rt_throttled = 1;
184 rt_rq->rt_throttled = 1; 205 rt_rq->rt_throttled = 1;
185 206
186 sched_rt_ratio_dequeue(rt_rq); 207 if (rt_rq_throttled(rt_rq)) {
187 return 1; 208 sched_rt_ratio_dequeue(rt_rq);
209 return 1;
210 }
188 } 211 }
189 212
190 return 0; 213 return 0;
@@ -265,6 +288,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
265 288
266 update_rt_migration(rq_of_rt_rq(rt_rq)); 289 update_rt_migration(rq_of_rt_rq(rt_rq));
267#endif 290#endif
291#ifdef CONFIG_FAIR_GROUP_SCHED
292 if (rt_se_boosted(rt_se))
293 rt_rq->rt_nr_boosted++;
294#endif
268} 295}
269 296
270static inline 297static inline
@@ -295,6 +322,12 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
295 322
296 update_rt_migration(rq_of_rt_rq(rt_rq)); 323 update_rt_migration(rq_of_rt_rq(rt_rq));
297#endif /* CONFIG_SMP */ 324#endif /* CONFIG_SMP */
325#ifdef CONFIG_FAIR_GROUP_SCHED
326 if (rt_se_boosted(rt_se))
327 rt_rq->rt_nr_boosted--;
328
329 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
330#endif
298} 331}
299 332
300static void enqueue_rt_entity(struct sched_rt_entity *rt_se) 333static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -303,7 +336,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
303 struct rt_prio_array *array = &rt_rq->active; 336 struct rt_prio_array *array = &rt_rq->active;
304 struct rt_rq *group_rq = group_rt_rq(rt_se); 337 struct rt_rq *group_rq = group_rt_rq(rt_se);
305 338
306 if (group_rq && group_rq->rt_throttled) 339 if (group_rq && rt_rq_throttled(group_rq))
307 return; 340 return;
308 341
309 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 342 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -496,7 +529,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
496 if (unlikely(!rt_rq->rt_nr_running)) 529 if (unlikely(!rt_rq->rt_nr_running))
497 return NULL; 530 return NULL;
498 531
499 if (sched_rt_ratio_exceeded(rt_rq)) 532 if (rt_rq_throttled(rt_rq))
500 return NULL; 533 return NULL;
501 534
502 do { 535 do {