aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index abb36b16b93b..6af210a7de70 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <linux/slab.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -2359,7 +2360,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2359{ 2360{
2360 int cpu, orig_cpu, this_cpu, success = 0; 2361 int cpu, orig_cpu, this_cpu, success = 0;
2361 unsigned long flags; 2362 unsigned long flags;
2362 struct rq *rq, *orig_rq; 2363 struct rq *rq;
2363 2364
2364 if (!sched_feat(SYNC_WAKEUPS)) 2365 if (!sched_feat(SYNC_WAKEUPS))
2365 wake_flags &= ~WF_SYNC; 2366 wake_flags &= ~WF_SYNC;
@@ -2367,7 +2368,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2367 this_cpu = get_cpu(); 2368 this_cpu = get_cpu();
2368 2369
2369 smp_wmb(); 2370 smp_wmb();
2370 rq = orig_rq = task_rq_lock(p, &flags); 2371 rq = task_rq_lock(p, &flags);
2371 update_rq_clock(rq); 2372 update_rq_clock(rq);
2372 if (!(p->state & state)) 2373 if (!(p->state & state))
2373 goto out; 2374 goto out;
@@ -2650,7 +2651,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2650{ 2651{
2651 unsigned long flags; 2652 unsigned long flags;
2652 struct rq *rq; 2653 struct rq *rq;
2653 int cpu = get_cpu(); 2654 int cpu __maybe_unused = get_cpu();
2654 2655
2655#ifdef CONFIG_SMP 2656#ifdef CONFIG_SMP
2656 /* 2657 /*
@@ -4353,7 +4354,7 @@ int can_nice(const struct task_struct *p, const int nice)
4353 /* convert nice value [19,-20] to rlimit style value [1,40] */ 4354 /* convert nice value [19,-20] to rlimit style value [1,40] */
4354 int nice_rlim = 20 - nice; 4355 int nice_rlim = 20 - nice;
4355 4356
4356 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || 4357 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
4357 capable(CAP_SYS_NICE)); 4358 capable(CAP_SYS_NICE));
4358} 4359}
4359 4360
@@ -4530,7 +4531,7 @@ recheck:
4530 4531
4531 if (!lock_task_sighand(p, &flags)) 4532 if (!lock_task_sighand(p, &flags))
4532 return -ESRCH; 4533 return -ESRCH;
4533 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; 4534 rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
4534 unlock_task_sighand(p, &flags); 4535 unlock_task_sighand(p, &flags);
4535 4536
4536 /* can't set/change the rt policy */ 4537 /* can't set/change the rt policy */
@@ -4902,7 +4903,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4902 int ret; 4903 int ret;
4903 cpumask_var_t mask; 4904 cpumask_var_t mask;
4904 4905
4905 if (len < cpumask_size()) 4906 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4907 return -EINVAL;
4908 if (len & (sizeof(unsigned long)-1))
4906 return -EINVAL; 4909 return -EINVAL;
4907 4910
4908 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4911 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -4910,10 +4913,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4910 4913
4911 ret = sched_getaffinity(pid, mask); 4914 ret = sched_getaffinity(pid, mask);
4912 if (ret == 0) { 4915 if (ret == 0) {
4913 if (copy_to_user(user_mask_ptr, mask, cpumask_size())) 4916 size_t retlen = min_t(size_t, len, cpumask_size());
4917
4918 if (copy_to_user(user_mask_ptr, mask, retlen))
4914 ret = -EFAULT; 4919 ret = -EFAULT;
4915 else 4920 else
4916 ret = cpumask_size(); 4921 ret = retlen;
4917 } 4922 }
4918 free_cpumask_var(mask); 4923 free_cpumask_var(mask);
4919 4924
@@ -5383,7 +5388,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5383 5388
5384 get_task_struct(mt); 5389 get_task_struct(mt);
5385 task_rq_unlock(rq, &flags); 5390 task_rq_unlock(rq, &flags);
5386 wake_up_process(rq->migration_thread); 5391 wake_up_process(mt);
5387 put_task_struct(mt); 5392 put_task_struct(mt);
5388 wait_for_completion(&req.done); 5393 wait_for_completion(&req.done);
5389 tlb_migrate_finish(p->mm); 5394 tlb_migrate_finish(p->mm);
@@ -7406,11 +7411,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7406 7411
7407#ifdef CONFIG_SCHED_MC 7412#ifdef CONFIG_SCHED_MC
7408static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, 7413static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
7414 struct sysdev_class_attribute *attr,
7409 char *page) 7415 char *page)
7410{ 7416{
7411 return sprintf(page, "%u\n", sched_mc_power_savings); 7417 return sprintf(page, "%u\n", sched_mc_power_savings);
7412} 7418}
7413static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, 7419static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
7420 struct sysdev_class_attribute *attr,
7414 const char *buf, size_t count) 7421 const char *buf, size_t count)
7415{ 7422{
7416 return sched_power_savings_store(buf, count, 0); 7423 return sched_power_savings_store(buf, count, 0);
@@ -7422,11 +7429,13 @@ static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7422 7429
7423#ifdef CONFIG_SCHED_SMT 7430#ifdef CONFIG_SCHED_SMT
7424static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, 7431static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
7432 struct sysdev_class_attribute *attr,
7425 char *page) 7433 char *page)
7426{ 7434{
7427 return sprintf(page, "%u\n", sched_smt_power_savings); 7435 return sprintf(page, "%u\n", sched_smt_power_savings);
7428} 7436}
7429static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, 7437static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
7438 struct sysdev_class_attribute *attr,
7430 const char *buf, size_t count) 7439 const char *buf, size_t count)
7431{ 7440{
7432 return sched_power_savings_store(buf, count, 1); 7441 return sched_power_savings_store(buf, count, 1);