diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-02-09 06:23:20 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-18 10:17:23 -0500 |
commit | 2636ed5f8d15ff9395731593537b4b3fdf2af24d (patch) | |
tree | 3e124314e5694fdd6a4b611a054ffe5112a92934 /kernel/sched | |
parent | 1fe89e1b6d270aa0d3452c60d38461ea589594e3 (diff) |
sched/rt: Avoid obvious configuration fail
Setting the root group's cpu.rt_runtime_us to 0 is a bad thing; it
would disallow the kernel creating RT tasks.
One can of course still set it to 1, which will (likely) still wreck
your kernel, but at least make it clear that setting it to 0 is not
good.
Collect both sanity checks into the one place while we're there.
Suggested-by: Zefan Li <lizefan@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20150209112715.GO24151@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 03a67f09404c..a4869bd426ca 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7675,6 +7675,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg, | |||
7675 | { | 7675 | { |
7676 | int i, err = 0; | 7676 | int i, err = 0; |
7677 | 7677 | ||
7678 | /* | ||
7679 | * Disallowing the root group RT runtime is BAD, it would disallow the | ||
7680 | * kernel creating (and or operating) RT threads. | ||
7681 | */ | ||
7682 | if (tg == &root_task_group && rt_runtime == 0) | ||
7683 | return -EINVAL; | ||
7684 | |||
7685 | /* No period doesn't make any sense. */ | ||
7686 | if (rt_period == 0) | ||
7687 | return -EINVAL; | ||
7688 | |||
7678 | mutex_lock(&rt_constraints_mutex); | 7689 | mutex_lock(&rt_constraints_mutex); |
7679 | read_lock(&tasklist_lock); | 7690 | read_lock(&tasklist_lock); |
7680 | err = __rt_schedulable(tg, rt_period, rt_runtime); | 7691 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
@@ -7731,9 +7742,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) | |||
7731 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; | 7742 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; |
7732 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 7743 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
7733 | 7744 | ||
7734 | if (rt_period == 0) | ||
7735 | return -EINVAL; | ||
7736 | |||
7737 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); | 7745 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
7738 | } | 7746 | } |
7739 | 7747 | ||