diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-02-27 06:00:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-03-07 10:43:00 -0500 |
commit | 2692a2406b9262bbb101708815be99ec2988e48b (patch) | |
tree | 5f956c6f28e7e83f5a1c57e2724b1739da25da30 /kernel | |
parent | 1868f958eb56fc41c5985c8732e564a400c5fdf5 (diff) |
sched: rt-group: fixup schedulability constraints calculation
it was only possible to configure the rt-group scheduling parameters
beyond the default value in a very small range.
that's because div64_64() has a different calling convention than
do_div() :/
fix a few untidies while we are here; sysctl_sched_rt_period may overflow
due to that multiplication, so cast to u64 first. Also that RUNTIME_INF
juggling makes little sense although its an effective NOP.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 10 |
1 files changed, 3 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5b13e4b0e009..b8ee864c7481 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7726,9 +7726,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
7726 | if (runtime == RUNTIME_INF) | 7726 | if (runtime == RUNTIME_INF) |
7727 | return 1ULL << 16; | 7727 | return 1ULL << 16; |
7728 | 7728 | ||
7729 | runtime *= (1ULL << 16); | 7729 | return div64_64(runtime << 16, period); |
7730 | div64_64(runtime, period); | ||
7731 | return runtime; | ||
7732 | } | 7730 | } |
7733 | 7731 | ||
7734 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 7732 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
@@ -7757,18 +7755,16 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) | |||
7757 | u64 rt_runtime, rt_period; | 7755 | u64 rt_runtime, rt_period; |
7758 | int err = 0; | 7756 | int err = 0; |
7759 | 7757 | ||
7760 | rt_period = sysctl_sched_rt_period * NSEC_PER_USEC; | 7758 | rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
7761 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; | 7759 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
7762 | if (rt_runtime_us == -1) | 7760 | if (rt_runtime_us == -1) |
7763 | rt_runtime = rt_period; | 7761 | rt_runtime = RUNTIME_INF; |
7764 | 7762 | ||
7765 | mutex_lock(&rt_constraints_mutex); | 7763 | mutex_lock(&rt_constraints_mutex); |
7766 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { | 7764 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { |
7767 | err = -EINVAL; | 7765 | err = -EINVAL; |
7768 | goto unlock; | 7766 | goto unlock; |
7769 | } | 7767 | } |
7770 | if (rt_runtime_us == -1) | ||
7771 | rt_runtime = RUNTIME_INF; | ||
7772 | tg->rt_runtime = rt_runtime; | 7768 | tg->rt_runtime = rt_runtime; |
7773 | unlock: | 7769 | unlock: |
7774 | mutex_unlock(&rt_constraints_mutex); | 7770 | mutex_unlock(&rt_constraints_mutex); |