aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c13f1bd2df7d..f06d059edef5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3886,6 +3886,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
3886 !capable(CAP_SYS_NICE)) 3886 !capable(CAP_SYS_NICE))
3887 goto out_unlock; 3887 goto out_unlock;
3888 3888
3889 retval = security_task_setscheduler(p, 0, NULL);
3890 if (retval)
3891 goto out_unlock;
3892
3889 cpus_allowed = cpuset_cpus_allowed(p); 3893 cpus_allowed = cpuset_cpus_allowed(p);
3890 cpus_and(new_mask, new_mask, cpus_allowed); 3894 cpus_and(new_mask, new_mask, cpus_allowed);
3891 retval = set_cpus_allowed(p, new_mask); 3895 retval = set_cpus_allowed(p, new_mask);
@@ -3954,7 +3958,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
3954 if (!p) 3958 if (!p)
3955 goto out_unlock; 3959 goto out_unlock;
3956 3960
3957 retval = 0; 3961 retval = security_task_getscheduler(p);
3962 if (retval)
3963 goto out_unlock;
3964
3958 cpus_and(*mask, p->cpus_allowed, cpu_online_map); 3965 cpus_and(*mask, p->cpus_allowed, cpu_online_map);
3959 3966
3960out_unlock: 3967out_unlock:
@@ -4046,6 +4053,9 @@ asmlinkage long sys_sched_yield(void)
4046 4053
4047static inline void __cond_resched(void) 4054static inline void __cond_resched(void)
4048{ 4055{
4056#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4057 __might_sleep(__FILE__, __LINE__);
4058#endif
4049 /* 4059 /*
4050 * The BKS might be reacquired before we have dropped 4060 * The BKS might be reacquired before we have dropped
4051 * PREEMPT_ACTIVE, which could trigger a second 4061 * PREEMPT_ACTIVE, which could trigger a second
@@ -4142,7 +4152,7 @@ EXPORT_SYMBOL(yield);
4142 */ 4152 */
4143void __sched io_schedule(void) 4153void __sched io_schedule(void)
4144{ 4154{
4145 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4155 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4146 4156
4147 atomic_inc(&rq->nr_iowait); 4157 atomic_inc(&rq->nr_iowait);
4148 schedule(); 4158 schedule();
@@ -4153,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
4153 4163
4154long __sched io_schedule_timeout(long timeout) 4164long __sched io_schedule_timeout(long timeout)
4155{ 4165{
4156 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4166 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4157 long ret; 4167 long ret;
4158 4168
4159 atomic_inc(&rq->nr_iowait); 4169 atomic_inc(&rq->nr_iowait);
@@ -4746,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4746 break; 4756 break;
4747#ifdef CONFIG_HOTPLUG_CPU 4757#ifdef CONFIG_HOTPLUG_CPU
4748 case CPU_UP_CANCELED: 4758 case CPU_UP_CANCELED:
4759 if (!cpu_rq(cpu)->migration_thread)
4760 break;
4749 /* Unbind it from offline cpu so it can run. Fall thru. */ 4761 /* Unbind it from offline cpu so it can run. Fall thru. */
4750 kthread_bind(cpu_rq(cpu)->migration_thread, 4762 kthread_bind(cpu_rq(cpu)->migration_thread,
4751 any_online_cpu(cpu_online_map)); 4763 any_online_cpu(cpu_online_map));