diff options
-rw-r--r-- | kernel/sched.c | 3 | ||||
-rw-r--r-- | kernel/sched_rt.c | 43 |
2 files changed, 41 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 88a17c7128c3..cecaea67ae9b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -362,6 +362,8 @@ struct rt_rq { | |||
362 | u64 rt_time; | 362 | u64 rt_time; |
363 | 363 | ||
364 | #ifdef CONFIG_FAIR_GROUP_SCHED | 364 | #ifdef CONFIG_FAIR_GROUP_SCHED |
365 | unsigned long rt_nr_boosted; | ||
366 | |||
365 | struct rq *rq; | 367 | struct rq *rq; |
366 | struct list_head leaf_rt_rq_list; | 368 | struct list_head leaf_rt_rq_list; |
367 | struct task_group *tg; | 369 | struct task_group *tg; |
@@ -7112,6 +7114,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
7112 | rt_rq->rt_throttled = 0; | 7114 | rt_rq->rt_throttled = 0; |
7113 | 7115 | ||
7114 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7116 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7117 | rt_rq->rt_nr_boosted = 0; | ||
7115 | rt_rq->rq = rq; | 7118 | rt_rq->rq = rq; |
7116 | #endif | 7119 | #endif |
7117 | } | 7120 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 274b40d7bef2..8d4269381239 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -110,6 +110,23 @@ static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) | |||
110 | dequeue_rt_entity(rt_se); | 110 | dequeue_rt_entity(rt_se); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
114 | { | ||
115 | return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; | ||
116 | } | ||
117 | |||
118 | static int rt_se_boosted(struct sched_rt_entity *rt_se) | ||
119 | { | ||
120 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | ||
121 | struct task_struct *p; | ||
122 | |||
123 | if (rt_rq) | ||
124 | return !!rt_rq->rt_nr_boosted; | ||
125 | |||
126 | p = rt_task_of(rt_se); | ||
127 | return p->prio != p->normal_prio; | ||
128 | } | ||
129 | |||
113 | #else | 130 | #else |
114 | 131 | ||
115 | static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) | 132 | static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) |
@@ -149,6 +166,10 @@ static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) | |||
149 | { | 166 | { |
150 | } | 167 | } |
151 | 168 | ||
169 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
170 | { | ||
171 | return rt_rq->rt_throttled; | ||
172 | } | ||
152 | #endif | 173 | #endif |
153 | 174 | ||
154 | static inline int rt_se_prio(struct sched_rt_entity *rt_se) | 175 | static inline int rt_se_prio(struct sched_rt_entity *rt_se) |
@@ -172,7 +193,7 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq) | |||
172 | return 0; | 193 | return 0; |
173 | 194 | ||
174 | if (rt_rq->rt_throttled) | 195 | if (rt_rq->rt_throttled) |
175 | return 1; | 196 | return rt_rq_throttled(rt_rq); |
176 | 197 | ||
177 | period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; | 198 | period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; |
178 | ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; | 199 | ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; |
@@ -183,8 +204,10 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq) | |||
183 | rq->rt_throttled = 1; | 204 | rq->rt_throttled = 1; |
184 | rt_rq->rt_throttled = 1; | 205 | rt_rq->rt_throttled = 1; |
185 | 206 | ||
186 | sched_rt_ratio_dequeue(rt_rq); | 207 | if (rt_rq_throttled(rt_rq)) { |
187 | return 1; | 208 | sched_rt_ratio_dequeue(rt_rq); |
209 | return 1; | ||
210 | } | ||
188 | } | 211 | } |
189 | 212 | ||
190 | return 0; | 213 | return 0; |
@@ -265,6 +288,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
265 | 288 | ||
266 | update_rt_migration(rq_of_rt_rq(rt_rq)); | 289 | update_rt_migration(rq_of_rt_rq(rt_rq)); |
267 | #endif | 290 | #endif |
291 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
292 | if (rt_se_boosted(rt_se)) | ||
293 | rt_rq->rt_nr_boosted++; | ||
294 | #endif | ||
268 | } | 295 | } |
269 | 296 | ||
270 | static inline | 297 | static inline |
@@ -295,6 +322,12 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
295 | 322 | ||
296 | update_rt_migration(rq_of_rt_rq(rt_rq)); | 323 | update_rt_migration(rq_of_rt_rq(rt_rq)); |
297 | #endif /* CONFIG_SMP */ | 324 | #endif /* CONFIG_SMP */ |
325 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
326 | if (rt_se_boosted(rt_se)) | ||
327 | rt_rq->rt_nr_boosted--; | ||
328 | |||
329 | WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); | ||
330 | #endif | ||
298 | } | 331 | } |
299 | 332 | ||
300 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 333 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) |
@@ -303,7 +336,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
303 | struct rt_prio_array *array = &rt_rq->active; | 336 | struct rt_prio_array *array = &rt_rq->active; |
304 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 337 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
305 | 338 | ||
306 | if (group_rq && group_rq->rt_throttled) | 339 | if (group_rq && rt_rq_throttled(group_rq)) |
307 | return; | 340 | return; |
308 | 341 | ||
309 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 342 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); |
@@ -496,7 +529,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
496 | if (unlikely(!rt_rq->rt_nr_running)) | 529 | if (unlikely(!rt_rq->rt_nr_running)) |
497 | return NULL; | 530 | return NULL; |
498 | 531 | ||
499 | if (sched_rt_ratio_exceeded(rt_rq)) | 532 | if (rt_rq_throttled(rt_rq)) |
500 | return NULL; | 533 | return NULL; |
501 | 534 | ||
502 | do { | 535 | do { |