aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9ab3cd7858d3..3c2a54f70ffe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <linux/slab.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -322,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
322/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
323static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
324{ 325{
326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
325#ifdef CONFIG_FAIR_GROUP_SCHED 335#ifdef CONFIG_FAIR_GROUP_SCHED
326 p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; 336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
327 p->se.parent = task_group(p)->se[cpu]; 337 p->se.parent = task_group(p)->se[cpu];
@@ -331,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
331 p->rt.rt_rq = task_group(p)->rt_rq[cpu]; 341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
332 p->rt.parent = task_group(p)->rt_se[cpu]; 342 p->rt.parent = task_group(p)->rt_se[cpu];
333#endif 343#endif
344 rcu_read_unlock();
334} 345}
335 346
336#else 347#else
@@ -2650,7 +2661,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2650{ 2661{
2651 unsigned long flags; 2662 unsigned long flags;
2652 struct rq *rq; 2663 struct rq *rq;
2653 int cpu = get_cpu(); 2664 int cpu __maybe_unused = get_cpu();
2654 2665
2655#ifdef CONFIG_SMP 2666#ifdef CONFIG_SMP
2656 /* 2667 /*
@@ -3779,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3779 * the mutex owner just released it and exited. 3790 * the mutex owner just released it and exited.
3780 */ 3791 */
3781 if (probe_kernel_address(&owner->cpu, cpu)) 3792 if (probe_kernel_address(&owner->cpu, cpu))
3782 goto out; 3793 return 0;
3783#else 3794#else
3784 cpu = owner->cpu; 3795 cpu = owner->cpu;
3785#endif 3796#endif
@@ -3789,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3789 * the cpu field may no longer be valid. 3800 * the cpu field may no longer be valid.
3790 */ 3801 */
3791 if (cpu >= nr_cpumask_bits) 3802 if (cpu >= nr_cpumask_bits)
3792 goto out; 3803 return 0;
3793 3804
3794 /* 3805 /*
3795 * We need to validate that we can do a 3806 * We need to validate that we can do a
3796 * get_cpu() and that we have the percpu area. 3807 * get_cpu() and that we have the percpu area.
3797 */ 3808 */
3798 if (!cpu_online(cpu)) 3809 if (!cpu_online(cpu))
3799 goto out; 3810 return 0;
3800 3811
3801 rq = cpu_rq(cpu); 3812 rq = cpu_rq(cpu);
3802 3813
@@ -3815,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3815 3826
3816 cpu_relax(); 3827 cpu_relax();
3817 } 3828 }
3818out: 3829
3819 return 1; 3830 return 1;
3820} 3831}
3821#endif 3832#endif
@@ -4902,7 +4913,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4902 int ret; 4913 int ret;
4903 cpumask_var_t mask; 4914 cpumask_var_t mask;
4904 4915
4905 if (len < cpumask_size()) 4916 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4917 return -EINVAL;
4918 if (len & (sizeof(unsigned long)-1))
4906 return -EINVAL; 4919 return -EINVAL;
4907 4920
4908 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4921 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -4910,10 +4923,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4910 4923
4911 ret = sched_getaffinity(pid, mask); 4924 ret = sched_getaffinity(pid, mask);
4912 if (ret == 0) { 4925 if (ret == 0) {
4913 if (copy_to_user(user_mask_ptr, mask, cpumask_size())) 4926 size_t retlen = min_t(size_t, len, cpumask_size());
4927
4928 if (copy_to_user(user_mask_ptr, mask, retlen))
4914 ret = -EFAULT; 4929 ret = -EFAULT;
4915 else 4930 else
4916 ret = cpumask_size(); 4931 ret = retlen;
4917 } 4932 }
4918 free_cpumask_var(mask); 4933 free_cpumask_var(mask);
4919 4934
@@ -5383,7 +5398,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5383 5398
5384 get_task_struct(mt); 5399 get_task_struct(mt);
5385 task_rq_unlock(rq, &flags); 5400 task_rq_unlock(rq, &flags);
5386 wake_up_process(rq->migration_thread); 5401 wake_up_process(mt);
5387 put_task_struct(mt); 5402 put_task_struct(mt);
5388 wait_for_completion(&req.done); 5403 wait_for_completion(&req.done);
5389 tlb_migrate_finish(p->mm); 5404 tlb_migrate_finish(p->mm);