diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 74 |
1 files changed, 39 insertions, 35 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5c848fd4e461..74f169ac0773 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1755,27 +1755,27 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
| 1755 | __releases(rq->lock) | 1755 | __releases(rq->lock) |
| 1756 | { | 1756 | { |
| 1757 | struct mm_struct *mm = rq->prev_mm; | 1757 | struct mm_struct *mm = rq->prev_mm; |
| 1758 | unsigned long prev_task_flags; | 1758 | long prev_state; |
| 1759 | 1759 | ||
| 1760 | rq->prev_mm = NULL; | 1760 | rq->prev_mm = NULL; |
| 1761 | 1761 | ||
| 1762 | /* | 1762 | /* |
| 1763 | * A task struct has one reference for the use as "current". | 1763 | * A task struct has one reference for the use as "current". |
| 1764 | * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and | 1764 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
| 1765 | * calls schedule one last time. The schedule call will never return, | 1765 | * schedule one last time. The schedule call will never return, and |
| 1766 | * and the scheduled task must drop that reference. | 1766 | * the scheduled task must drop that reference. |
| 1767 | * The test for EXIT_ZOMBIE must occur while the runqueue locks are | 1767 | * The test for TASK_DEAD must occur while the runqueue locks are |
| 1768 | * still held, otherwise prev could be scheduled on another cpu, die | 1768 | * still held, otherwise prev could be scheduled on another cpu, die |
| 1769 | * there before we look at prev->state, and then the reference would | 1769 | * there before we look at prev->state, and then the reference would |
| 1770 | * be dropped twice. | 1770 | * be dropped twice. |
| 1771 | * Manfred Spraul <manfred@colorfullife.com> | 1771 | * Manfred Spraul <manfred@colorfullife.com> |
| 1772 | */ | 1772 | */ |
| 1773 | prev_task_flags = prev->flags; | 1773 | prev_state = prev->state; |
| 1774 | finish_arch_switch(prev); | 1774 | finish_arch_switch(prev); |
| 1775 | finish_lock_switch(rq, prev); | 1775 | finish_lock_switch(rq, prev); |
| 1776 | if (mm) | 1776 | if (mm) |
| 1777 | mmdrop(mm); | 1777 | mmdrop(mm); |
| 1778 | if (unlikely(prev_task_flags & PF_DEAD)) { | 1778 | if (unlikely(prev_state == TASK_DEAD)) { |
| 1779 | /* | 1779 | /* |
| 1780 | * Remove function-return probe instances associated with this | 1780 | * Remove function-return probe instances associated with this |
| 1781 | * task and put them back on the free list. | 1781 | * task and put them back on the free list. |
| @@ -3348,9 +3348,6 @@ need_resched_nonpreemptible: | |||
| 3348 | 3348 | ||
| 3349 | spin_lock_irq(&rq->lock); | 3349 | spin_lock_irq(&rq->lock); |
| 3350 | 3350 | ||
| 3351 | if (unlikely(prev->flags & PF_DEAD)) | ||
| 3352 | prev->state = EXIT_DEAD; | ||
| 3353 | |||
| 3354 | switch_count = &prev->nivcsw; | 3351 | switch_count = &prev->nivcsw; |
| 3355 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3352 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
| 3356 | switch_count = &prev->nvcsw; | 3353 | switch_count = &prev->nvcsw; |
| @@ -4080,6 +4077,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) | |||
| 4080 | * @p: the task in question. | 4077 | * @p: the task in question. |
| 4081 | * @policy: new policy. | 4078 | * @policy: new policy. |
| 4082 | * @param: structure containing the new RT priority. | 4079 | * @param: structure containing the new RT priority. |
| 4080 | * | ||
| 4081 | * NOTE: the task may be already dead | ||
| 4083 | */ | 4082 | */ |
| 4084 | int sched_setscheduler(struct task_struct *p, int policy, | 4083 | int sched_setscheduler(struct task_struct *p, int policy, |
| 4085 | struct sched_param *param) | 4084 | struct sched_param *param) |
| @@ -4107,28 +4106,32 @@ recheck: | |||
| 4107 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || | 4106 | (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || |
| 4108 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) | 4107 | (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) |
| 4109 | return -EINVAL; | 4108 | return -EINVAL; |
| 4110 | if ((policy == SCHED_NORMAL || policy == SCHED_BATCH) | 4109 | if (is_rt_policy(policy) != (param->sched_priority != 0)) |
| 4111 | != (param->sched_priority == 0)) | ||
| 4112 | return -EINVAL; | 4110 | return -EINVAL; |
| 4113 | 4111 | ||
| 4114 | /* | 4112 | /* |
| 4115 | * Allow unprivileged RT tasks to decrease priority: | 4113 | * Allow unprivileged RT tasks to decrease priority: |
| 4116 | */ | 4114 | */ |
| 4117 | if (!capable(CAP_SYS_NICE)) { | 4115 | if (!capable(CAP_SYS_NICE)) { |
| 4118 | /* | 4116 | if (is_rt_policy(policy)) { |
| 4119 | * can't change policy, except between SCHED_NORMAL | 4117 | unsigned long rlim_rtprio; |
| 4120 | * and SCHED_BATCH: | 4118 | unsigned long flags; |
| 4121 | */ | 4119 | |
| 4122 | if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) && | 4120 | if (!lock_task_sighand(p, &flags)) |
| 4123 | (policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) && | 4121 | return -ESRCH; |
| 4124 | !p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) | 4122 | rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; |
| 4125 | return -EPERM; | 4123 | unlock_task_sighand(p, &flags); |
| 4126 | /* can't increase priority */ | 4124 | |
| 4127 | if ((policy != SCHED_NORMAL && policy != SCHED_BATCH) && | 4125 | /* can't set/change the rt policy */ |
| 4128 | param->sched_priority > p->rt_priority && | 4126 | if (policy != p->policy && !rlim_rtprio) |
| 4129 | param->sched_priority > | 4127 | return -EPERM; |
| 4130 | p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) | 4128 | |
| 4131 | return -EPERM; | 4129 | /* can't increase priority */ |
| 4130 | if (param->sched_priority > p->rt_priority && | ||
| 4131 | param->sched_priority > rlim_rtprio) | ||
| 4132 | return -EPERM; | ||
| 4133 | } | ||
| 4134 | |||
| 4132 | /* can't change other user's priorities */ | 4135 | /* can't change other user's priorities */ |
| 4133 | if ((current->euid != p->euid) && | 4136 | if ((current->euid != p->euid) && |
| 4134 | (current->euid != p->uid)) | 4137 | (current->euid != p->uid)) |
| @@ -4193,14 +4196,13 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
| 4193 | return -EINVAL; | 4196 | return -EINVAL; |
| 4194 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) | 4197 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) |
| 4195 | return -EFAULT; | 4198 | return -EFAULT; |
| 4196 | read_lock_irq(&tasklist_lock); | 4199 | |
| 4200 | rcu_read_lock(); | ||
| 4201 | retval = -ESRCH; | ||
| 4197 | p = find_process_by_pid(pid); | 4202 | p = find_process_by_pid(pid); |
| 4198 | if (!p) { | 4203 | if (p != NULL) |
| 4199 | read_unlock_irq(&tasklist_lock); | 4204 | retval = sched_setscheduler(p, policy, &lparam); |
| 4200 | return -ESRCH; | 4205 | rcu_read_unlock(); |
| 4201 | } | ||
| 4202 | retval = sched_setscheduler(p, policy, &lparam); | ||
| 4203 | read_unlock_irq(&tasklist_lock); | ||
| 4204 | 4206 | ||
| 4205 | return retval; | 4207 | return retval; |
| 4206 | } | 4208 | } |
| @@ -5151,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) | |||
| 5151 | BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); | 5153 | BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); |
| 5152 | 5154 | ||
| 5153 | /* Cannot have done final schedule yet: would have vanished. */ | 5155 | /* Cannot have done final schedule yet: would have vanished. */ |
| 5154 | BUG_ON(p->flags & PF_DEAD); | 5156 | BUG_ON(p->state == TASK_DEAD); |
| 5155 | 5157 | ||
| 5156 | get_task_struct(p); | 5158 | get_task_struct(p); |
| 5157 | 5159 | ||
| @@ -5272,9 +5274,11 @@ static struct notifier_block __cpuinitdata migration_notifier = { | |||
| 5272 | int __init migration_init(void) | 5274 | int __init migration_init(void) |
| 5273 | { | 5275 | { |
| 5274 | void *cpu = (void *)(long)smp_processor_id(); | 5276 | void *cpu = (void *)(long)smp_processor_id(); |
| 5277 | int err; | ||
| 5275 | 5278 | ||
| 5276 | /* Start one for the boot CPU: */ | 5279 | /* Start one for the boot CPU: */ |
| 5277 | migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); | 5280 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
| 5281 | BUG_ON(err == NOTIFY_BAD); | ||
| 5278 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 5282 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
| 5279 | register_cpu_notifier(&migration_notifier); | 5283 | register_cpu_notifier(&migration_notifier); |
| 5280 | 5284 | ||
