aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-19 08:22:26 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-20 04:26:00 -0400
commiteff6549b957d15d1ad168d90b8c1eb643b9c163f (patch)
tree016e025635dabb070edf1f48fe1cee5308e18fd4 /kernel/sched_rt.c
parentb79f3833d81d54fc71d98c8064dc45f33a755a8a (diff)
sched: rt: move some code around
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Daniel K." <dk@uw.no> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c119
1 files changed, 57 insertions, 62 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 87b2e3bf947..61d52112289 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -229,68 +229,6 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
229#endif 229#endif
230 230
231#ifdef CONFIG_SMP 231#ifdef CONFIG_SMP
232static int do_balance_runtime(struct rt_rq *rt_rq);
233
234static int balance_runtime(struct rt_rq *rt_rq)
235{
236 int more = 0;
237
238 if (rt_rq->rt_time > rt_rq->rt_runtime) {
239 spin_unlock(&rt_rq->rt_runtime_lock);
240 more = do_balance_runtime(rt_rq);
241 spin_lock(&rt_rq->rt_runtime_lock);
242 }
243
244 return more;
245}
246#else
247static inline int balance_runtime(struct rt_rq *rt_rq)
248{
249 return 0;
250}
251#endif
252
253static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
254{
255 int i, idle = 1;
256 cpumask_t span;
257
258 if (rt_b->rt_runtime == RUNTIME_INF)
259 return 1;
260
261 span = sched_rt_period_mask();
262 for_each_cpu_mask(i, span) {
263 int enqueue = 0;
264 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
265 struct rq *rq = rq_of_rt_rq(rt_rq);
266
267 spin_lock(&rq->lock);
268 if (rt_rq->rt_time) {
269 u64 runtime;
270
271 spin_lock(&rt_rq->rt_runtime_lock);
272 if (rt_rq->rt_throttled)
273 balance_runtime(rt_rq);
274 runtime = rt_rq->rt_runtime;
275 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
276 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
277 rt_rq->rt_throttled = 0;
278 enqueue = 1;
279 }
280 if (rt_rq->rt_time || rt_rq->rt_nr_running)
281 idle = 0;
282 spin_unlock(&rt_rq->rt_runtime_lock);
283 }
284
285 if (enqueue)
286 sched_rt_rq_enqueue(rt_rq);
287 spin_unlock(&rq->lock);
288 }
289
290 return idle;
291}
292
293#ifdef CONFIG_SMP
294static int do_balance_runtime(struct rt_rq *rt_rq) 232static int do_balance_runtime(struct rt_rq *rt_rq)
295{ 233{
296 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 234 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
@@ -425,8 +363,65 @@ static void enable_runtime(struct rq *rq)
425 spin_unlock_irqrestore(&rq->lock, flags); 363 spin_unlock_irqrestore(&rq->lock, flags);
426} 364}
427 365
366static int balance_runtime(struct rt_rq *rt_rq)
367{
368 int more = 0;
369
370 if (rt_rq->rt_time > rt_rq->rt_runtime) {
371 spin_unlock(&rt_rq->rt_runtime_lock);
372 more = do_balance_runtime(rt_rq);
373 spin_lock(&rt_rq->rt_runtime_lock);
374 }
375
376 return more;
377}
378#else
379static inline int balance_runtime(struct rt_rq *rt_rq)
380{
381 return 0;
382}
428#endif 383#endif
429 384
385static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
386{
387 int i, idle = 1;
388 cpumask_t span;
389
390 if (rt_b->rt_runtime == RUNTIME_INF)
391 return 1;
392
393 span = sched_rt_period_mask();
394 for_each_cpu_mask(i, span) {
395 int enqueue = 0;
396 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
397 struct rq *rq = rq_of_rt_rq(rt_rq);
398
399 spin_lock(&rq->lock);
400 if (rt_rq->rt_time) {
401 u64 runtime;
402
403 spin_lock(&rt_rq->rt_runtime_lock);
404 if (rt_rq->rt_throttled)
405 balance_runtime(rt_rq);
406 runtime = rt_rq->rt_runtime;
407 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
408 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
409 rt_rq->rt_throttled = 0;
410 enqueue = 1;
411 }
412 if (rt_rq->rt_time || rt_rq->rt_nr_running)
413 idle = 0;
414 spin_unlock(&rt_rq->rt_runtime_lock);
415 }
416
417 if (enqueue)
418 sched_rt_rq_enqueue(rt_rq);
419 spin_unlock(&rq->lock);
420 }
421
422 return idle;
423}
424
430static inline int rt_se_prio(struct sched_rt_entity *rt_se) 425static inline int rt_se_prio(struct sched_rt_entity *rt_se)
431{ 426{
432#ifdef CONFIG_RT_GROUP_SCHED 427#ifdef CONFIG_RT_GROUP_SCHED