aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 150b6988de49..6af210a7de70 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <linux/slab.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -2359,7 +2360,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2359{ 2360{
2360 int cpu, orig_cpu, this_cpu, success = 0; 2361 int cpu, orig_cpu, this_cpu, success = 0;
2361 unsigned long flags; 2362 unsigned long flags;
2362 struct rq *rq, *orig_rq; 2363 struct rq *rq;
2363 2364
2364 if (!sched_feat(SYNC_WAKEUPS)) 2365 if (!sched_feat(SYNC_WAKEUPS))
2365 wake_flags &= ~WF_SYNC; 2366 wake_flags &= ~WF_SYNC;
@@ -2367,7 +2368,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2367 this_cpu = get_cpu(); 2368 this_cpu = get_cpu();
2368 2369
2369 smp_wmb(); 2370 smp_wmb();
2370 rq = orig_rq = task_rq_lock(p, &flags); 2371 rq = task_rq_lock(p, &flags);
2371 update_rq_clock(rq); 2372 update_rq_clock(rq);
2372 if (!(p->state & state)) 2373 if (!(p->state & state))
2373 goto out; 2374 goto out;
@@ -2650,7 +2651,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2650{ 2651{
2651 unsigned long flags; 2652 unsigned long flags;
2652 struct rq *rq; 2653 struct rq *rq;
2653 int cpu = get_cpu(); 2654 int cpu __maybe_unused = get_cpu();
2654 2655
2655#ifdef CONFIG_SMP 2656#ifdef CONFIG_SMP
2656 /* 2657 /*
@@ -4902,7 +4903,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4902 int ret; 4903 int ret;
4903 cpumask_var_t mask; 4904 cpumask_var_t mask;
4904 4905
4905 if (len < cpumask_size()) 4906 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4907 return -EINVAL;
4908 if (len & (sizeof(unsigned long)-1))
4906 return -EINVAL; 4909 return -EINVAL;
4907 4910
4908 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4911 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -4910,10 +4913,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4910 4913
4911 ret = sched_getaffinity(pid, mask); 4914 ret = sched_getaffinity(pid, mask);
4912 if (ret == 0) { 4915 if (ret == 0) {
4913 if (copy_to_user(user_mask_ptr, mask, cpumask_size())) 4916 size_t retlen = min_t(size_t, len, cpumask_size());
4917
4918 if (copy_to_user(user_mask_ptr, mask, retlen))
4914 ret = -EFAULT; 4919 ret = -EFAULT;
4915 else 4920 else
4916 ret = cpumask_size(); 4921 ret = retlen;
4917 } 4922 }
4918 free_cpumask_var(mask); 4923 free_cpumask_var(mask);
4919 4924
@@ -5383,7 +5388,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5383 5388
5384 get_task_struct(mt); 5389 get_task_struct(mt);
5385 task_rq_unlock(rq, &flags); 5390 task_rq_unlock(rq, &flags);
5386 wake_up_process(rq->migration_thread); 5391 wake_up_process(mt);
5387 put_task_struct(mt); 5392 put_task_struct(mt);
5388 wait_for_completion(&req.done); 5393 wait_for_completion(&req.done);
5389 tlb_migrate_finish(p->mm); 5394 tlb_migrate_finish(p->mm);