diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-06-22 23:55:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-23 16:57:56 -0400 |
commit | 961ccddd59d627b89bd3dc284b6517833bbdf25d (patch) | |
tree | 4d755085845b8beeaa0bbc3d3ee66e1ccc4156a8 /kernel | |
parent | 481c5346d0981940ee63037eb53e4e37b0735c10 (diff) |
sched: add new API sched_setscheduler_nocheck: add a flag to control access checks
Hidehiro Kawai noticed that sched_setscheduler() can fail in
stop_machine: it calls sched_setscheduler() from insmod, which can
have CAP_SYS_MODULE without CAP_SYS_NICE.
Two cases could have failed, so are changed to sched_setscheduler_nocheck:
kernel/softirq.c:cpu_callback()
- CPU hotplug callback
kernel/stop_machine.c:__stop_machine_run()
- Called from various places, including modprobe()
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Cc: sugita <yumiko.sugita.yf@hitachi.com>
Cc: Satoshi OSHIMA <satoshi.oshima.fk@hitachi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 48 | ||||
-rw-r--r-- | kernel/softirq.c | 2 | ||||
-rw-r--r-- | kernel/stop_machine.c | 2 |
3 files changed, 38 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index b048ad8a11af..8d7c246ab864 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4746,16 +4746,8 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
4746 | set_load_weight(p); | 4746 | set_load_weight(p); |
4747 | } | 4747 | } |
4748 | 4748 | ||
4749 | /** | 4749 | static int __sched_setscheduler(struct task_struct *p, int policy, |
4750 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. | 4750 | struct sched_param *param, bool user) |
4751 | * @p: the task in question. | ||
4752 | * @policy: new policy. | ||
4753 | * @param: structure containing the new RT priority. | ||
4754 | * | ||
4755 | * NOTE that the task may be already dead. | ||
4756 | */ | ||
4757 | int sched_setscheduler(struct task_struct *p, int policy, | ||
4758 | struct sched_param *param) | ||
4759 | { | 4751 | { |
4760 | int retval, oldprio, oldpolicy = -1, on_rq, running; | 4752 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
4761 | unsigned long flags; | 4753 | unsigned long flags; |
@@ -4787,7 +4779,7 @@ recheck: | |||
4787 | /* | 4779 | /* |
4788 | * Allow unprivileged RT tasks to decrease priority: | 4780 | * Allow unprivileged RT tasks to decrease priority: |
4789 | */ | 4781 | */ |
4790 | if (!capable(CAP_SYS_NICE)) { | 4782 | if (user && !capable(CAP_SYS_NICE)) { |
4791 | if (rt_policy(policy)) { | 4783 | if (rt_policy(policy)) { |
4792 | unsigned long rlim_rtprio; | 4784 | unsigned long rlim_rtprio; |
4793 | 4785 | ||
@@ -4823,7 +4815,8 @@ recheck: | |||
4823 | * Do not allow realtime tasks into groups that have no runtime | 4815 | * Do not allow realtime tasks into groups that have no runtime |
4824 | * assigned. | 4816 | * assigned. |
4825 | */ | 4817 | */ |
4826 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | 4818 | if (user |
4819 | && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | ||
4827 | return -EPERM; | 4820 | return -EPERM; |
4828 | #endif | 4821 | #endif |
4829 | 4822 | ||
@@ -4872,8 +4865,39 @@ recheck: | |||
4872 | 4865 | ||
4873 | return 0; | 4866 | return 0; |
4874 | } | 4867 | } |
4868 | |||
4869 | /** | ||
4870 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. | ||
4871 | * @p: the task in question. | ||
4872 | * @policy: new policy. | ||
4873 | * @param: structure containing the new RT priority. | ||
4874 | * | ||
4875 | * NOTE that the task may be already dead. | ||
4876 | */ | ||
4877 | int sched_setscheduler(struct task_struct *p, int policy, | ||
4878 | struct sched_param *param) | ||
4879 | { | ||
4880 | return __sched_setscheduler(p, policy, param, true); | ||
4881 | } | ||
4875 | EXPORT_SYMBOL_GPL(sched_setscheduler); | 4882 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
4876 | 4883 | ||
4884 | /** | ||
4885 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. | ||
4886 | * @p: the task in question. | ||
4887 | * @policy: new policy. | ||
4888 | * @param: structure containing the new RT priority. | ||
4889 | * | ||
4890 | * Just like sched_setscheduler, only don't bother checking if the | ||
4891 | * current context has permission. For example, this is needed in | ||
4892 | * stop_machine(): we create temporary high priority worker threads, | ||
4893 | * but our caller might not have that capability. | ||
4894 | */ | ||
4895 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, | ||
4896 | struct sched_param *param) | ||
4897 | { | ||
4898 | return __sched_setscheduler(p, policy, param, false); | ||
4899 | } | ||
4900 | |||
4877 | static int | 4901 | static int |
4878 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 4902 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
4879 | { | 4903 | { |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 36e061740047..afd9120c2fc4 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -645,7 +645,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
645 | 645 | ||
646 | p = per_cpu(ksoftirqd, hotcpu); | 646 | p = per_cpu(ksoftirqd, hotcpu); |
647 | per_cpu(ksoftirqd, hotcpu) = NULL; | 647 | per_cpu(ksoftirqd, hotcpu) = NULL; |
648 | sched_setscheduler(p, SCHED_FIFO, ¶m); | 648 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
649 | kthread_stop(p); | 649 | kthread_stop(p); |
650 | takeover_tasklets(hotcpu); | 650 | takeover_tasklets(hotcpu); |
651 | break; | 651 | break; |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7350bbfb076..ba9b2054ecbd 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -187,7 +187,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, | |||
187 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 187 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
188 | 188 | ||
189 | /* One high-prio thread per cpu. We'll do this one. */ | 189 | /* One high-prio thread per cpu. We'll do this one. */ |
190 | sched_setscheduler(p, SCHED_FIFO, ¶m); | 190 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
191 | kthread_bind(p, cpu); | 191 | kthread_bind(p, cpu); |
192 | wake_up_process(p); | 192 | wake_up_process(p); |
193 | wait_for_completion(&smdata.done); | 193 | wait_for_completion(&smdata.done); |