aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-13 09:45:39 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-13 09:45:39 -0500
commit23b0fdfc9299b137bd126e9dc22f62a59dae546d (patch)
tree22019172c555109b69a73da76561d99d7776c4f7 /kernel/sched.c
parent4cf5d77a6eefaa7a464bc34e8cb767356f10fd74 (diff)
sched: rt-group: deal with PI
Steven mentioned the fun case where a lock holding task will be throttled. Simple fix: allow groups that have boosted tasks to run anyway. If a runnable task in a throttled group gets boosted the dequeue/enqueue done by rt_mutex_setprio() is enough to unthrottle the group. This is ofcourse not quite correct. Two possible ways forward are: - second prio array for boosted tasks - boost to a prio ceiling (this would also work for deadline scheduling) Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 88a17c7128c3..cecaea67ae9b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -362,6 +362,8 @@ struct rt_rq {
362 u64 rt_time; 362 u64 rt_time;
363 363
364#ifdef CONFIG_FAIR_GROUP_SCHED 364#ifdef CONFIG_FAIR_GROUP_SCHED
365 unsigned long rt_nr_boosted;
366
365 struct rq *rq; 367 struct rq *rq;
366 struct list_head leaf_rt_rq_list; 368 struct list_head leaf_rt_rq_list;
367 struct task_group *tg; 369 struct task_group *tg;
@@ -7112,6 +7114,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7112 rt_rq->rt_throttled = 0; 7114 rt_rq->rt_throttled = 0;
7113 7115
7114#ifdef CONFIG_FAIR_GROUP_SCHED 7116#ifdef CONFIG_FAIR_GROUP_SCHED
7117 rt_rq->rt_nr_boosted = 0;
7115 rt_rq->rq = rq; 7118 rt_rq->rq = rq;
7116#endif 7119#endif
7117} 7120}