diff options
| -rw-r--r-- | include/linux/sched.h | 2 | ||||
| -rw-r--r-- | kernel/sched.c | 48 | ||||
| -rw-r--r-- | kernel/softirq.c | 2 | ||||
| -rw-r--r-- | kernel/stop_machine.c | 2 |
4 files changed, 40 insertions, 14 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5d1af10b90c3..21349173d148 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1658,6 +1658,8 @@ extern int can_nice(const struct task_struct *p, const int nice); | |||
| 1658 | extern int task_curr(const struct task_struct *p); | 1658 | extern int task_curr(const struct task_struct *p); |
| 1659 | extern int idle_cpu(int cpu); | 1659 | extern int idle_cpu(int cpu); |
| 1660 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 1660 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); |
| 1661 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | ||
| 1662 | struct sched_param *); | ||
| 1661 | extern struct task_struct *idle_task(int cpu); | 1663 | extern struct task_struct *idle_task(int cpu); |
| 1662 | extern struct task_struct *curr_task(int cpu); | 1664 | extern struct task_struct *curr_task(int cpu); |
| 1663 | extern void set_curr_task(int cpu, struct task_struct *p); | 1665 | extern void set_curr_task(int cpu, struct task_struct *p); |
diff --git a/kernel/sched.c b/kernel/sched.c index c74b0d23c752..2317a2178104 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4995,16 +4995,8 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
| 4995 | set_load_weight(p); | 4995 | set_load_weight(p); |
| 4996 | } | 4996 | } |
| 4997 | 4997 | ||
| 4998 | /** | 4998 | static int __sched_setscheduler(struct task_struct *p, int policy, |
| 4999 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. | 4999 | struct sched_param *param, bool user) |
| 5000 | * @p: the task in question. | ||
| 5001 | * @policy: new policy. | ||
| 5002 | * @param: structure containing the new RT priority. | ||
| 5003 | * | ||
| 5004 | * NOTE that the task may be already dead. | ||
| 5005 | */ | ||
| 5006 | int sched_setscheduler(struct task_struct *p, int policy, | ||
| 5007 | struct sched_param *param) | ||
| 5008 | { | 5000 | { |
| 5009 | int retval, oldprio, oldpolicy = -1, on_rq, running; | 5001 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
| 5010 | unsigned long flags; | 5002 | unsigned long flags; |
| @@ -5036,7 +5028,7 @@ recheck: | |||
| 5036 | /* | 5028 | /* |
| 5037 | * Allow unprivileged RT tasks to decrease priority: | 5029 | * Allow unprivileged RT tasks to decrease priority: |
| 5038 | */ | 5030 | */ |
| 5039 | if (!capable(CAP_SYS_NICE)) { | 5031 | if (user && !capable(CAP_SYS_NICE)) { |
| 5040 | if (rt_policy(policy)) { | 5032 | if (rt_policy(policy)) { |
| 5041 | unsigned long rlim_rtprio; | 5033 | unsigned long rlim_rtprio; |
| 5042 | 5034 | ||
| @@ -5072,7 +5064,8 @@ recheck: | |||
| 5072 | * Do not allow realtime tasks into groups that have no runtime | 5064 | * Do not allow realtime tasks into groups that have no runtime |
| 5073 | * assigned. | 5065 | * assigned. |
| 5074 | */ | 5066 | */ |
| 5075 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | 5067 | if (user |
| 5068 | && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | ||
| 5076 | return -EPERM; | 5069 | return -EPERM; |
| 5077 | #endif | 5070 | #endif |
| 5078 | 5071 | ||
| @@ -5121,8 +5114,39 @@ recheck: | |||
| 5121 | 5114 | ||
| 5122 | return 0; | 5115 | return 0; |
| 5123 | } | 5116 | } |
| 5117 | |||
| 5118 | /** | ||
| 5119 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. | ||
| 5120 | * @p: the task in question. | ||
| 5121 | * @policy: new policy. | ||
| 5122 | * @param: structure containing the new RT priority. | ||
| 5123 | * | ||
| 5124 | * NOTE that the task may be already dead. | ||
| 5125 | */ | ||
| 5126 | int sched_setscheduler(struct task_struct *p, int policy, | ||
| 5127 | struct sched_param *param) | ||
| 5128 | { | ||
| 5129 | return __sched_setscheduler(p, policy, param, true); | ||
| 5130 | } | ||
| 5124 | EXPORT_SYMBOL_GPL(sched_setscheduler); | 5131 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
| 5125 | 5132 | ||
| 5133 | /** | ||
| 5134 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. | ||
| 5135 | * @p: the task in question. | ||
| 5136 | * @policy: new policy. | ||
| 5137 | * @param: structure containing the new RT priority. | ||
| 5138 | * | ||
| 5139 | * Just like sched_setscheduler, only don't bother checking if the | ||
| 5140 | * current context has permission. For example, this is needed in | ||
| 5141 | * stop_machine(): we create temporary high priority worker threads, | ||
| 5142 | * but our caller might not have that capability. | ||
| 5143 | */ | ||
| 5144 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, | ||
| 5145 | struct sched_param *param) | ||
| 5146 | { | ||
| 5147 | return __sched_setscheduler(p, policy, param, false); | ||
| 5148 | } | ||
| 5149 | |||
| 5126 | static int | 5150 | static int |
| 5127 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 5151 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
| 5128 | { | 5152 | { |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 36e061740047..afd9120c2fc4 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -645,7 +645,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
| 645 | 645 | ||
| 646 | p = per_cpu(ksoftirqd, hotcpu); | 646 | p = per_cpu(ksoftirqd, hotcpu); |
| 647 | per_cpu(ksoftirqd, hotcpu) = NULL; | 647 | per_cpu(ksoftirqd, hotcpu) = NULL; |
| 648 | sched_setscheduler(p, SCHED_FIFO, ¶m); | 648 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
| 649 | kthread_stop(p); | 649 | kthread_stop(p); |
| 650 | takeover_tasklets(hotcpu); | 650 | takeover_tasklets(hotcpu); |
| 651 | break; | 651 | break; |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7350bbfb076..ba9b2054ecbd 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -187,7 +187,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, | |||
| 187 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 187 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| 188 | 188 | ||
| 189 | /* One high-prio thread per cpu. We'll do this one. */ | 189 | /* One high-prio thread per cpu. We'll do this one. */ |
| 190 | sched_setscheduler(p, SCHED_FIFO, ¶m); | 190 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
| 191 | kthread_bind(p, cpu); | 191 | kthread_bind(p, cpu); |
| 192 | wake_up_process(p); | 192 | wake_up_process(p); |
| 193 | wait_for_completion(&smdata.done); | 193 | wait_for_completion(&smdata.done); |
