diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-19 06:33:04 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-19 07:10:10 -0400 |
commit | 0b148fa04852859972abbf848177b92daeef138a (patch) | |
tree | 12d4b0daa8292fe406871b5aa034d7c9b2fae13d | |
parent | 6f0d5c390e4206dcb3804a5072a048fdb7d2b428 (diff) |
sched: rt-bandwidth group disable fixes
More extensive disable of bandwidth control. It allows sysctl_sched_rt_runtime
to disable full group bandwidth control.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 9 | ||||
-rw-r--r-- | kernel/sched_rt.c | 5 |
2 files changed, 12 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9a1ddb84e26d..c1bee5fb8154 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -204,11 +204,13 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; |
205 | } | 205 | } |
206 | 206 | ||
207 | static inline int rt_bandwidth_enabled(void); | ||
208 | |||
207 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | 209 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
208 | { | 210 | { |
209 | ktime_t now; | 211 | ktime_t now; |
210 | 212 | ||
211 | if (rt_b->rt_runtime == RUNTIME_INF) | 213 | if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) |
212 | return; | 214 | return; |
213 | 215 | ||
214 | if (hrtimer_active(&rt_b->rt_period_timer)) | 216 | if (hrtimer_active(&rt_b->rt_period_timer)) |
@@ -839,6 +841,11 @@ static inline u64 global_rt_runtime(void) | |||
839 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | 841 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
840 | } | 842 | } |
841 | 843 | ||
844 | static inline int rt_bandwidth_enabled(void) | ||
845 | { | ||
846 | return sysctl_sched_rt_runtime >= 0; | ||
847 | } | ||
848 | |||
842 | #ifndef prepare_arch_switch | 849 | #ifndef prepare_arch_switch |
843 | # define prepare_arch_switch(next) do { } while (0) | 850 | # define prepare_arch_switch(next) do { } while (0) |
844 | #endif | 851 | #endif |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 77340b04a538..94daace5ee15 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -386,7 +386,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
386 | int i, idle = 1; | 386 | int i, idle = 1; |
387 | cpumask_t span; | 387 | cpumask_t span; |
388 | 388 | ||
389 | if (rt_b->rt_runtime == RUNTIME_INF) | 389 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
390 | return 1; | 390 | return 1; |
391 | 391 | ||
392 | span = sched_rt_period_mask(); | 392 | span = sched_rt_period_mask(); |
@@ -484,6 +484,9 @@ static void update_curr_rt(struct rq *rq) | |||
484 | curr->se.exec_start = rq->clock; | 484 | curr->se.exec_start = rq->clock; |
485 | cpuacct_charge(curr, delta_exec); | 485 | cpuacct_charge(curr, delta_exec); |
486 | 486 | ||
487 | if (!rt_bandwidth_enabled()) | ||
488 | return; | ||
489 | |||
487 | for_each_sched_rt_entity(rt_se) { | 490 | for_each_sched_rt_entity(rt_se) { |
488 | rt_rq = rt_rq_of_se(rt_se); | 491 | rt_rq = rt_rq_of_se(rt_se); |
489 | 492 | ||