aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-08-29 06:02:08 -0400
committerIngo Molnar <mingo@kernel.org>2013-08-29 06:02:08 -0400
commitaee2bce3cfdcb9bf2c51c24496ee776e8202ed11 (patch)
tree66ff8e345cf693cfb39383f25ad796e2f59ab6ad /kernel/sched
parent5ec4c599a52362896c3e7c6a31ba6145dca9c6f5 (diff)
parentc95389b4cd6a4b52af78bea706a274453e886251 (diff)
Merge branch 'linus' into perf/core
Pick up the latest upstream fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c96
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/fair.c10
3 files changed, 84 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6df0fbe53767..6f006002b211 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
933/** 933/**
934 * task_curr - is this task currently executing on a CPU? 934 * task_curr - is this task currently executing on a CPU?
935 * @p: the task in question. 935 * @p: the task in question.
936 *
937 * Return: 1 if the task is currently executing. 0 otherwise.
936 */ 938 */
937inline int task_curr(const struct task_struct *p) 939inline int task_curr(const struct task_struct *p)
938{ 940{
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1482 * the simpler "current->state = TASK_RUNNING" to mark yourself 1484 * the simpler "current->state = TASK_RUNNING" to mark yourself
1483 * runnable without the overhead of this. 1485 * runnable without the overhead of this.
1484 * 1486 *
1485 * Returns %true if @p was woken up, %false if it was already running 1487 * Return: %true if @p was woken up, %false if it was already running.
1486 * or @state didn't match @p's state. 1488 * or @state didn't match @p's state.
1487 */ 1489 */
1488static int 1490static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1491 unsigned long flags; 1493 unsigned long flags;
1492 int cpu, success = 0; 1494 int cpu, success = 0;
1493 1495
1494 smp_wmb(); 1496 /*
1497 * If we are going to wake up a thread waiting for CONDITION we
1498 * need to ensure that CONDITION=1 done by the caller can not be
1499 * reordered with p->state check below. This pairs with mb() in
1500 * set_current_state() the waiting thread does.
1501 */
1502 smp_mb__before_spinlock();
1495 raw_spin_lock_irqsave(&p->pi_lock, flags); 1503 raw_spin_lock_irqsave(&p->pi_lock, flags);
1496 if (!(p->state & state)) 1504 if (!(p->state & state))
1497 goto out; 1505 goto out;
@@ -1577,8 +1585,9 @@ out:
1577 * @p: The process to be woken up. 1585 * @p: The process to be woken up.
1578 * 1586 *
1579 * Attempt to wake up the nominated process and move it to the set of runnable 1587 * Attempt to wake up the nominated process and move it to the set of runnable
1580 * processes. Returns 1 if the process was woken up, 0 if it was already 1588 * processes.
1581 * running. 1589 *
1590 * Return: 1 if the process was woken up, 0 if it was already running.
1582 * 1591 *
1583 * It may be assumed that this function implies a write memory barrier before 1592 * It may be assumed that this function implies a write memory barrier before
1584 * changing the task state if and only if any tasks are woken up. 1593 * changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
2191 * This makes sure that uptime, CFS vruntime, load 2200 * This makes sure that uptime, CFS vruntime, load
2192 * balancing, etc... continue to move forward, even 2201 * balancing, etc... continue to move forward, even
2193 * with a very low granularity. 2202 * with a very low granularity.
2203 *
2204 * Return: Maximum deferment in nanoseconds.
2194 */ 2205 */
2195u64 scheduler_tick_max_deferment(void) 2206u64 scheduler_tick_max_deferment(void)
2196{ 2207{
@@ -2394,6 +2405,12 @@ need_resched:
2394 if (sched_feat(HRTICK)) 2405 if (sched_feat(HRTICK))
2395 hrtick_clear(rq); 2406 hrtick_clear(rq);
2396 2407
2408 /*
2409 * Make sure that signal_pending_state()->signal_pending() below
2410 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
2411 * done by the caller to avoid the race with signal_wake_up().
2412 */
2413 smp_mb__before_spinlock();
2397 raw_spin_lock_irq(&rq->lock); 2414 raw_spin_lock_irq(&rq->lock);
2398 2415
2399 switch_count = &prev->nivcsw; 2416 switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
2796 * specified timeout to expire. The timeout is in jiffies. It is not 2813 * specified timeout to expire. The timeout is in jiffies. It is not
2797 * interruptible. 2814 * interruptible.
2798 * 2815 *
2799 * The return value is 0 if timed out, and positive (at least 1, or number of 2816 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2800 * jiffies left till timeout) if completed. 2817 * till timeout) if completed.
2801 */ 2818 */
2802unsigned long __sched 2819unsigned long __sched
2803wait_for_completion_timeout(struct completion *x, unsigned long timeout) 2820wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
2829 * specified timeout to expire. The timeout is in jiffies. It is not 2846 * specified timeout to expire. The timeout is in jiffies. It is not
2830 * interruptible. The caller is accounted as waiting for IO. 2847 * interruptible. The caller is accounted as waiting for IO.
2831 * 2848 *
2832 * The return value is 0 if timed out, and positive (at least 1, or number of 2849 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2833 * jiffies left till timeout) if completed. 2850 * till timeout) if completed.
2834 */ 2851 */
2835unsigned long __sched 2852unsigned long __sched
2836wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) 2853wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
2846 * This waits for completion of a specific task to be signaled. It is 2863 * This waits for completion of a specific task to be signaled. It is
2847 * interruptible. 2864 * interruptible.
2848 * 2865 *
2849 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2866 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2850 */ 2867 */
2851int __sched wait_for_completion_interruptible(struct completion *x) 2868int __sched wait_for_completion_interruptible(struct completion *x)
2852{ 2869{
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
2865 * This waits for either a completion of a specific task to be signaled or for a 2882 * This waits for either a completion of a specific task to be signaled or for a
2866 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 2883 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2867 * 2884 *
2868 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2885 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2869 * positive (at least 1, or number of jiffies left till timeout) if completed. 2886 * or number of jiffies left till timeout) if completed.
2870 */ 2887 */
2871long __sched 2888long __sched
2872wait_for_completion_interruptible_timeout(struct completion *x, 2889wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2883 * This waits to be signaled for completion of a specific task. It can be 2900 * This waits to be signaled for completion of a specific task. It can be
2884 * interrupted by a kill signal. 2901 * interrupted by a kill signal.
2885 * 2902 *
2886 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2903 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2887 */ 2904 */
2888int __sched wait_for_completion_killable(struct completion *x) 2905int __sched wait_for_completion_killable(struct completion *x)
2889{ 2906{
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
2903 * signaled or for a specified timeout to expire. It can be 2920 * signaled or for a specified timeout to expire. It can be
2904 * interrupted by a kill signal. The timeout is in jiffies. 2921 * interrupted by a kill signal. The timeout is in jiffies.
2905 * 2922 *
2906 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2923 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2907 * positive (at least 1, or number of jiffies left till timeout) if completed. 2924 * or number of jiffies left till timeout) if completed.
2908 */ 2925 */
2909long __sched 2926long __sched
2910wait_for_completion_killable_timeout(struct completion *x, 2927wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
2918 * try_wait_for_completion - try to decrement a completion without blocking 2935 * try_wait_for_completion - try to decrement a completion without blocking
2919 * @x: completion structure 2936 * @x: completion structure
2920 * 2937 *
2921 * Returns: 0 if a decrement cannot be done without blocking 2938 * Return: 0 if a decrement cannot be done without blocking
2922 * 1 if a decrement succeeded. 2939 * 1 if a decrement succeeded.
2923 * 2940 *
2924 * If a completion is being used as a counting completion, 2941 * If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
2945 * completion_done - Test to see if a completion has any waiters 2962 * completion_done - Test to see if a completion has any waiters
2946 * @x: completion structure 2963 * @x: completion structure
2947 * 2964 *
2948 * Returns: 0 if there are waiters (wait_for_completion() in progress) 2965 * Return: 0 if there are waiters (wait_for_completion() in progress)
2949 * 1 if there are no waiters. 2966 * 1 if there are no waiters.
2950 * 2967 *
2951 */ 2968 */
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
3182 * task_prio - return the priority value of a given task. 3199 * task_prio - return the priority value of a given task.
3183 * @p: the task in question. 3200 * @p: the task in question.
3184 * 3201 *
3185 * This is the priority value as seen by users in /proc. 3202 * Return: The priority value as seen by users in /proc.
3186 * RT tasks are offset by -200. Normal tasks are centered 3203 * RT tasks are offset by -200. Normal tasks are centered
3187 * around 0, value goes from -16 to +15. 3204 * around 0, value goes from -16 to +15.
3188 */ 3205 */
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
3194/** 3211/**
3195 * task_nice - return the nice value of a given task. 3212 * task_nice - return the nice value of a given task.
3196 * @p: the task in question. 3213 * @p: the task in question.
3214 *
3215 * Return: The nice value [ -20 ... 0 ... 19 ].
3197 */ 3216 */
3198int task_nice(const struct task_struct *p) 3217int task_nice(const struct task_struct *p)
3199{ 3218{
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
3204/** 3223/**
3205 * idle_cpu - is a given cpu idle currently? 3224 * idle_cpu - is a given cpu idle currently?
3206 * @cpu: the processor in question. 3225 * @cpu: the processor in question.
3226 *
3227 * Return: 1 if the CPU is currently idle. 0 otherwise.
3207 */ 3228 */
3208int idle_cpu(int cpu) 3229int idle_cpu(int cpu)
3209{ 3230{
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
3226/** 3247/**
3227 * idle_task - return the idle task for a given cpu. 3248 * idle_task - return the idle task for a given cpu.
3228 * @cpu: the processor in question. 3249 * @cpu: the processor in question.
3250 *
3251 * Return: The idle task for the cpu @cpu.
3229 */ 3252 */
3230struct task_struct *idle_task(int cpu) 3253struct task_struct *idle_task(int cpu)
3231{ 3254{
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
3235/** 3258/**
3236 * find_process_by_pid - find a process with a matching PID value. 3259 * find_process_by_pid - find a process with a matching PID value.
3237 * @pid: the pid in question. 3260 * @pid: the pid in question.
3261 *
3262 * The task of @pid, if found. %NULL otherwise.
3238 */ 3263 */
3239static struct task_struct *find_process_by_pid(pid_t pid) 3264static struct task_struct *find_process_by_pid(pid_t pid)
3240{ 3265{
@@ -3432,6 +3457,8 @@ recheck:
3432 * @policy: new policy. 3457 * @policy: new policy.
3433 * @param: structure containing the new RT priority. 3458 * @param: structure containing the new RT priority.
3434 * 3459 *
3460 * Return: 0 on success. An error code otherwise.
3461 *
3435 * NOTE that the task may be already dead. 3462 * NOTE that the task may be already dead.
3436 */ 3463 */
3437int sched_setscheduler(struct task_struct *p, int policy, 3464int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
3451 * current context has permission. For example, this is needed in 3478 * current context has permission. For example, this is needed in
3452 * stop_machine(): we create temporary high priority worker threads, 3479 * stop_machine(): we create temporary high priority worker threads,
3453 * but our caller might not have that capability. 3480 * but our caller might not have that capability.
3481 *
3482 * Return: 0 on success. An error code otherwise.
3454 */ 3483 */
3455int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3484int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3456 const struct sched_param *param) 3485 const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3485 * @pid: the pid in question. 3514 * @pid: the pid in question.
3486 * @policy: new policy. 3515 * @policy: new policy.
3487 * @param: structure containing the new RT priority. 3516 * @param: structure containing the new RT priority.
3517 *
3518 * Return: 0 on success. An error code otherwise.
3488 */ 3519 */
3489SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3520SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3490 struct sched_param __user *, param) 3521 struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3500 * sys_sched_setparam - set/change the RT priority of a thread 3531 * sys_sched_setparam - set/change the RT priority of a thread
3501 * @pid: the pid in question. 3532 * @pid: the pid in question.
3502 * @param: structure containing the new RT priority. 3533 * @param: structure containing the new RT priority.
3534 *
3535 * Return: 0 on success. An error code otherwise.
3503 */ 3536 */
3504SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3537SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3505{ 3538{
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3509/** 3542/**
3510 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3543 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3511 * @pid: the pid in question. 3544 * @pid: the pid in question.
3545 *
3546 * Return: On success, the policy of the thread. Otherwise, a negative error
3547 * code.
3512 */ 3548 */
3513SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3549SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3514{ 3550{
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3535 * sys_sched_getparam - get the RT priority of a thread 3571 * sys_sched_getparam - get the RT priority of a thread
3536 * @pid: the pid in question. 3572 * @pid: the pid in question.
3537 * @param: structure containing the RT priority. 3573 * @param: structure containing the RT priority.
3574 *
3575 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3576 * code.
3538 */ 3577 */
3539SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3578SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3540{ 3579{
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3659 * @pid: pid of the process 3698 * @pid: pid of the process
3660 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3699 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3661 * @user_mask_ptr: user-space pointer to the new cpu mask 3700 * @user_mask_ptr: user-space pointer to the new cpu mask
3701 *
3702 * Return: 0 on success. An error code otherwise.
3662 */ 3703 */
3663SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 3704SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3664 unsigned long __user *, user_mask_ptr) 3705 unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
3710 * @pid: pid of the process 3751 * @pid: pid of the process
3711 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3752 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3712 * @user_mask_ptr: user-space pointer to hold the current cpu mask 3753 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3754 *
3755 * Return: 0 on success. An error code otherwise.
3713 */ 3756 */
3714SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 3757SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3715 unsigned long __user *, user_mask_ptr) 3758 unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3744 * 3787 *
3745 * This function yields the current CPU to other tasks. If there are no 3788 * This function yields the current CPU to other tasks. If there are no
3746 * other threads running on this CPU then this function will return. 3789 * other threads running on this CPU then this function will return.
3790 *
3791 * Return: 0.
3747 */ 3792 */
3748SYSCALL_DEFINE0(sched_yield) 3793SYSCALL_DEFINE0(sched_yield)
3749{ 3794{
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
3869 * It's the caller's job to ensure that the target task struct 3914 * It's the caller's job to ensure that the target task struct
3870 * can't go away on us before we can do any checks. 3915 * can't go away on us before we can do any checks.
3871 * 3916 *
3872 * Returns: 3917 * Return:
3873 * true (>0) if we indeed boosted the target task. 3918 * true (>0) if we indeed boosted the target task.
3874 * false (0) if we failed to boost the target. 3919 * false (0) if we failed to boost the target.
3875 * -ESRCH if there's no task to yield to. 3920 * -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
3972 * sys_sched_get_priority_max - return maximum RT priority. 4017 * sys_sched_get_priority_max - return maximum RT priority.
3973 * @policy: scheduling class. 4018 * @policy: scheduling class.
3974 * 4019 *
3975 * this syscall returns the maximum rt_priority that can be used 4020 * Return: On success, this syscall returns the maximum
3976 * by a given scheduling class. 4021 * rt_priority that can be used by a given scheduling class.
4022 * On failure, a negative error code is returned.
3977 */ 4023 */
3978SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4024SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3979{ 4025{
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3997 * sys_sched_get_priority_min - return minimum RT priority. 4043 * sys_sched_get_priority_min - return minimum RT priority.
3998 * @policy: scheduling class. 4044 * @policy: scheduling class.
3999 * 4045 *
4000 * this syscall returns the minimum rt_priority that can be used 4046 * Return: On success, this syscall returns the minimum
4001 * by a given scheduling class. 4047 * rt_priority that can be used by a given scheduling class.
4048 * On failure, a negative error code is returned.
4002 */ 4049 */
4003SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4050SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4004{ 4051{
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4024 * 4071 *
4025 * this syscall writes the default timeslice value of a given process 4072 * this syscall writes the default timeslice value of a given process
4026 * into the user-space timespec buffer. A value of '0' means infinity. 4073 * into the user-space timespec buffer. A value of '0' means infinity.
4074 *
4075 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4076 * an error code.
4027 */ 4077 */
4028SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4078SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4029 struct timespec __user *, interval) 4079 struct timespec __user *, interval)
@@ -6637,6 +6687,8 @@ void normalize_rt_tasks(void)
6637 * @cpu: the processor in question. 6687 * @cpu: the processor in question.
6638 * 6688 *
6639 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6689 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6690 *
6691 * Return: The current task for @cpu.
6640 */ 6692 */
6641struct task_struct *curr_task(int cpu) 6693struct task_struct *curr_task(int cpu)
6642{ 6694{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46f..8b836b376d91 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
62 * any discrepancies created by racing against the uncertainty of the current 62 * any discrepancies created by racing against the uncertainty of the current
63 * priority configuration. 63 * priority configuration.
64 * 64 *
65 * Returns: (int)bool - CPUs were found 65 * Return: (int)bool - CPUs were found
66 */ 66 */
67int cpupri_find(struct cpupri *cp, struct task_struct *p, 67int cpupri_find(struct cpupri *cp, struct task_struct *p,
68 struct cpumask *lowest_mask) 68 struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
203 * cpupri_init - initialize the cpupri structure 203 * cpupri_init - initialize the cpupri structure
204 * @cp: The cpupri context 204 * @cp: The cpupri context
205 * 205 *
206 * Returns: -ENOMEM if memory fails. 206 * Return: -ENOMEM on memory allocation failure.
207 */ 207 */
208int cpupri_init(struct cpupri *cp) 208int cpupri_init(struct cpupri *cp)
209{ 209{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 10d729b02696..8977a249816f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2032 */ 2032 */
2033 update_entity_load_avg(curr, 1); 2033 update_entity_load_avg(curr, 1);
2034 update_cfs_rq_blocked_load(cfs_rq, 1); 2034 update_cfs_rq_blocked_load(cfs_rq, 1);
2035 update_cfs_shares(cfs_rq);
2035 2036
2036#ifdef CONFIG_SCHED_HRTICK 2037#ifdef CONFIG_SCHED_HRTICK
2037 /* 2038 /*
@@ -4324,6 +4325,8 @@ struct sg_lb_stats {
4324 * get_sd_load_idx - Obtain the load index for a given sched domain. 4325 * get_sd_load_idx - Obtain the load index for a given sched domain.
4325 * @sd: The sched_domain whose load_idx is to be obtained. 4326 * @sd: The sched_domain whose load_idx is to be obtained.
4326 * @idle: The Idle status of the CPU for whose sd load_icx is obtained. 4327 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4328 *
4329 * Return: The load index.
4327 */ 4330 */
4328static inline int get_sd_load_idx(struct sched_domain *sd, 4331static inline int get_sd_load_idx(struct sched_domain *sd,
4329 enum cpu_idle_type idle) 4332 enum cpu_idle_type idle)
@@ -4618,6 +4621,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
4618 * 4621 *
4619 * Determine if @sg is a busier group than the previously selected 4622 * Determine if @sg is a busier group than the previously selected
4620 * busiest group. 4623 * busiest group.
4624 *
4625 * Return: %true if @sg is a busier group than the previously selected
4626 * busiest group. %false otherwise.
4621 */ 4627 */
4622static bool update_sd_pick_busiest(struct lb_env *env, 4628static bool update_sd_pick_busiest(struct lb_env *env,
4623 struct sd_lb_stats *sds, 4629 struct sd_lb_stats *sds,
@@ -4735,7 +4741,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
4735 * assuming lower CPU number will be equivalent to lower a SMT thread 4741 * assuming lower CPU number will be equivalent to lower a SMT thread
4736 * number. 4742 * number.
4737 * 4743 *
4738 * Returns 1 when packing is required and a task should be moved to 4744 * Return: 1 when packing is required and a task should be moved to
4739 * this CPU. The amount of the imbalance is returned in *imbalance. 4745 * this CPU. The amount of the imbalance is returned in *imbalance.
4740 * 4746 *
4741 * @env: The load balancing environment. 4747 * @env: The load balancing environment.
@@ -4913,7 +4919,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4913 * @balance: Pointer to a variable indicating if this_cpu 4919 * @balance: Pointer to a variable indicating if this_cpu
4914 * is the appropriate cpu to perform load balancing at this_level. 4920 * is the appropriate cpu to perform load balancing at this_level.
4915 * 4921 *
4916 * Returns: - the busiest group if imbalance exists. 4922 * Return: - The busiest group if imbalance exists.
4917 * - If no imbalance and user has opted for power-savings balance, 4923 * - If no imbalance and user has opted for power-savings balance,
4918 * return the least loaded group whose CPUs can be 4924 * return the least loaded group whose CPUs can be
4919 * put to idle by rebalancing its tasks onto our group. 4925 * put to idle by rebalancing its tasks onto our group.