aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c48
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/stop_machine.c2
3 files changed, 38 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b048ad8a11af..8d7c246ab864 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4746,16 +4746,8 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4746 set_load_weight(p); 4746 set_load_weight(p);
4747} 4747}
4748 4748
4749/** 4749static int __sched_setscheduler(struct task_struct *p, int policy,
4750 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 4750 struct sched_param *param, bool user)
4751 * @p: the task in question.
4752 * @policy: new policy.
4753 * @param: structure containing the new RT priority.
4754 *
4755 * NOTE that the task may be already dead.
4756 */
4757int sched_setscheduler(struct task_struct *p, int policy,
4758 struct sched_param *param)
4759{ 4751{
4760 int retval, oldprio, oldpolicy = -1, on_rq, running; 4752 int retval, oldprio, oldpolicy = -1, on_rq, running;
4761 unsigned long flags; 4753 unsigned long flags;
@@ -4787,7 +4779,7 @@ recheck:
4787 /* 4779 /*
4788 * Allow unprivileged RT tasks to decrease priority: 4780 * Allow unprivileged RT tasks to decrease priority:
4789 */ 4781 */
4790 if (!capable(CAP_SYS_NICE)) { 4782 if (user && !capable(CAP_SYS_NICE)) {
4791 if (rt_policy(policy)) { 4783 if (rt_policy(policy)) {
4792 unsigned long rlim_rtprio; 4784 unsigned long rlim_rtprio;
4793 4785
@@ -4823,7 +4815,8 @@ recheck:
4823 * Do not allow realtime tasks into groups that have no runtime 4815 * Do not allow realtime tasks into groups that have no runtime
4824 * assigned. 4816 * assigned.
4825 */ 4817 */
4826 if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) 4818 if (user
4819 && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
4827 return -EPERM; 4820 return -EPERM;
4828#endif 4821#endif
4829 4822
@@ -4872,8 +4865,39 @@ recheck:
4872 4865
4873 return 0; 4866 return 0;
4874} 4867}
4868
4869/**
4870 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4871 * @p: the task in question.
4872 * @policy: new policy.
4873 * @param: structure containing the new RT priority.
4874 *
4875 * NOTE that the task may be already dead.
4876 */
4877int sched_setscheduler(struct task_struct *p, int policy,
4878 struct sched_param *param)
4879{
4880 return __sched_setscheduler(p, policy, param, true);
4881}
4875EXPORT_SYMBOL_GPL(sched_setscheduler); 4882EXPORT_SYMBOL_GPL(sched_setscheduler);
4876 4883
4884/**
4885 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4886 * @p: the task in question.
4887 * @policy: new policy.
4888 * @param: structure containing the new RT priority.
4889 *
4890 * Just like sched_setscheduler, only don't bother checking if the
4891 * current context has permission. For example, this is needed in
4892 * stop_machine(): we create temporary high priority worker threads,
4893 * but our caller might not have that capability.
4894 */
4895int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4896 struct sched_param *param)
4897{
4898 return __sched_setscheduler(p, policy, param, false);
4899}
4900
4877static int 4901static int
4878do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4902do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4879{ 4903{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 36e061740047..afd9120c2fc4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -645,7 +645,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
645 645
646 p = per_cpu(ksoftirqd, hotcpu); 646 p = per_cpu(ksoftirqd, hotcpu);
647 per_cpu(ksoftirqd, hotcpu) = NULL; 647 per_cpu(ksoftirqd, hotcpu) = NULL;
648 sched_setscheduler(p, SCHED_FIFO, &param); 648 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
649 kthread_stop(p); 649 kthread_stop(p);
650 takeover_tasklets(hotcpu); 650 takeover_tasklets(hotcpu);
651 break; 651 break;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index b7350bbfb076..ba9b2054ecbd 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -187,7 +187,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
187 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 187 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
188 188
189 /* One high-prio thread per cpu. We'll do this one. */ 189 /* One high-prio thread per cpu. We'll do this one. */
190 sched_setscheduler(p, SCHED_FIFO, &param); 190 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
191 kthread_bind(p, cpu); 191 kthread_bind(p, cpu);
192 wake_up_process(p); 192 wake_up_process(p);
193 wait_for_completion(&smdata.done); 193 wait_for_completion(&smdata.done);