aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h6
-rw-r--r--kernel/sched/core.c82
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/fair.c9
4 files changed, 76 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 50d04b92ceda..82300247974c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1532 * Test if a process is not yet dead (at most zombie state) 1532 * Test if a process is not yet dead (at most zombie state)
1533 * If pid_alive fails, then pointers within the task structure 1533 * If pid_alive fails, then pointers within the task structure
1534 * can be stale and must not be dereferenced. 1534 * can be stale and must not be dereferenced.
1535 *
1536 * Return: 1 if the process is alive. 0 otherwise.
1535 */ 1537 */
1536static inline int pid_alive(struct task_struct *p) 1538static inline int pid_alive(struct task_struct *p)
1537{ 1539{
@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p)
1543 * @tsk: Task structure to be checked. 1545 * @tsk: Task structure to be checked.
1544 * 1546 *
1545 * Check if a task structure is the first user space task the kernel created. 1547 * Check if a task structure is the first user space task the kernel created.
1548 *
1549 * Return: 1 if the task structure is init. 0 otherwise.
1546 */ 1550 */
1547static inline int is_global_init(struct task_struct *tsk) 1551static inline int is_global_init(struct task_struct *tsk)
1548{ 1552{
@@ -1893,6 +1897,8 @@ extern struct task_struct *idle_task(int cpu);
1893/** 1897/**
1894 * is_idle_task - is the specified task an idle task? 1898 * is_idle_task - is the specified task an idle task?
1895 * @p: the task in question. 1899 * @p: the task in question.
1900 *
1901 * Return: 1 if @p is an idle task. 0 otherwise.
1896 */ 1902 */
1897static inline bool is_idle_task(const struct task_struct *p) 1903static inline bool is_idle_task(const struct task_struct *p)
1898{ 1904{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0d8eb4525e76..4c3967f91e20 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
933/** 933/**
934 * task_curr - is this task currently executing on a CPU? 934 * task_curr - is this task currently executing on a CPU?
935 * @p: the task in question. 935 * @p: the task in question.
936 *
937 * Return: 1 if the task is currently executing. 0 otherwise.
936 */ 938 */
937inline int task_curr(const struct task_struct *p) 939inline int task_curr(const struct task_struct *p)
938{ 940{
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1482 * the simpler "current->state = TASK_RUNNING" to mark yourself 1484 * the simpler "current->state = TASK_RUNNING" to mark yourself
1483 * runnable without the overhead of this. 1485 * runnable without the overhead of this.
1484 * 1486 *
1485 * Returns %true if @p was woken up, %false if it was already running 1487 * Return: %true if @p was woken up, %false if it was already running.
1486 * or @state didn't match @p's state. 1488 * or @state didn't match @p's state.
1487 */ 1489 */
1488static int 1490static int
@@ -1577,8 +1579,9 @@ out:
1577 * @p: The process to be woken up. 1579 * @p: The process to be woken up.
1578 * 1580 *
1579 * Attempt to wake up the nominated process and move it to the set of runnable 1581 * Attempt to wake up the nominated process and move it to the set of runnable
1580 * processes. Returns 1 if the process was woken up, 0 if it was already 1582 * processes.
1581 * running. 1583 *
1584 * Return: 1 if the process was woken up, 0 if it was already running.
1582 * 1585 *
1583 * It may be assumed that this function implies a write memory barrier before 1586 * It may be assumed that this function implies a write memory barrier before
1584 * changing the task state if and only if any tasks are woken up. 1587 * changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2194,8 @@ void scheduler_tick(void)
2191 * This makes sure that uptime, CFS vruntime, load 2194 * This makes sure that uptime, CFS vruntime, load
2192 * balancing, etc... continue to move forward, even 2195 * balancing, etc... continue to move forward, even
2193 * with a very low granularity. 2196 * with a very low granularity.
2197 *
2198 * Return: Maximum deferment in nanoseconds.
2194 */ 2199 */
2195u64 scheduler_tick_max_deferment(void) 2200u64 scheduler_tick_max_deferment(void)
2196{ 2201{
@@ -2796,8 +2801,8 @@ EXPORT_SYMBOL(wait_for_completion);
2796 * specified timeout to expire. The timeout is in jiffies. It is not 2801 * specified timeout to expire. The timeout is in jiffies. It is not
2797 * interruptible. 2802 * interruptible.
2798 * 2803 *
2799 * The return value is 0 if timed out, and positive (at least 1, or number of 2804 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2800 * jiffies left till timeout) if completed. 2805 * till timeout) if completed.
2801 */ 2806 */
2802unsigned long __sched 2807unsigned long __sched
2803wait_for_completion_timeout(struct completion *x, unsigned long timeout) 2808wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2834,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
2829 * specified timeout to expire. The timeout is in jiffies. It is not 2834 * specified timeout to expire. The timeout is in jiffies. It is not
2830 * interruptible. The caller is accounted as waiting for IO. 2835 * interruptible. The caller is accounted as waiting for IO.
2831 * 2836 *
2832 * The return value is 0 if timed out, and positive (at least 1, or number of 2837 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2833 * jiffies left till timeout) if completed. 2838 * till timeout) if completed.
2834 */ 2839 */
2835unsigned long __sched 2840unsigned long __sched
2836wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) 2841wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2851,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
2846 * This waits for completion of a specific task to be signaled. It is 2851 * This waits for completion of a specific task to be signaled. It is
2847 * interruptible. 2852 * interruptible.
2848 * 2853 *
2849 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2854 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2850 */ 2855 */
2851int __sched wait_for_completion_interruptible(struct completion *x) 2856int __sched wait_for_completion_interruptible(struct completion *x)
2852{ 2857{
@@ -2865,8 +2870,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
2865 * This waits for either a completion of a specific task to be signaled or for a 2870 * This waits for either a completion of a specific task to be signaled or for a
2866 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 2871 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2867 * 2872 *
2868 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2873 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2869 * positive (at least 1, or number of jiffies left till timeout) if completed. 2874 * or number of jiffies left till timeout) if completed.
2870 */ 2875 */
2871long __sched 2876long __sched
2872wait_for_completion_interruptible_timeout(struct completion *x, 2877wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2888,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2883 * This waits to be signaled for completion of a specific task. It can be 2888 * This waits to be signaled for completion of a specific task. It can be
2884 * interrupted by a kill signal. 2889 * interrupted by a kill signal.
2885 * 2890 *
2886 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2891 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2887 */ 2892 */
2888int __sched wait_for_completion_killable(struct completion *x) 2893int __sched wait_for_completion_killable(struct completion *x)
2889{ 2894{
@@ -2903,8 +2908,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
2903 * signaled or for a specified timeout to expire. It can be 2908 * signaled or for a specified timeout to expire. It can be
2904 * interrupted by a kill signal. The timeout is in jiffies. 2909 * interrupted by a kill signal. The timeout is in jiffies.
2905 * 2910 *
2906 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2911 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2907 * positive (at least 1, or number of jiffies left till timeout) if completed. 2912 * or number of jiffies left till timeout) if completed.
2908 */ 2913 */
2909long __sched 2914long __sched
2910wait_for_completion_killable_timeout(struct completion *x, 2915wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2923,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
2918 * try_wait_for_completion - try to decrement a completion without blocking 2923 * try_wait_for_completion - try to decrement a completion without blocking
2919 * @x: completion structure 2924 * @x: completion structure
2920 * 2925 *
2921 * Returns: 0 if a decrement cannot be done without blocking 2926 * Return: 0 if a decrement cannot be done without blocking
2922 * 1 if a decrement succeeded. 2927 * 1 if a decrement succeeded.
2923 * 2928 *
2924 * If a completion is being used as a counting completion, 2929 * If a completion is being used as a counting completion,
@@ -2945,7 +2950,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
2945 * completion_done - Test to see if a completion has any waiters 2950 * completion_done - Test to see if a completion has any waiters
2946 * @x: completion structure 2951 * @x: completion structure
2947 * 2952 *
2948 * Returns: 0 if there are waiters (wait_for_completion() in progress) 2953 * Return: 0 if there are waiters (wait_for_completion() in progress)
2949 * 1 if there are no waiters. 2954 * 1 if there are no waiters.
2950 * 2955 *
2951 */ 2956 */
@@ -3182,7 +3187,7 @@ SYSCALL_DEFINE1(nice, int, increment)
3182 * task_prio - return the priority value of a given task. 3187 * task_prio - return the priority value of a given task.
3183 * @p: the task in question. 3188 * @p: the task in question.
3184 * 3189 *
3185 * This is the priority value as seen by users in /proc. 3190 * Return: The priority value as seen by users in /proc.
3186 * RT tasks are offset by -200. Normal tasks are centered 3191 * RT tasks are offset by -200. Normal tasks are centered
3187 * around 0, value goes from -16 to +15. 3192 * around 0, value goes from -16 to +15.
3188 */ 3193 */
@@ -3194,6 +3199,8 @@ int task_prio(const struct task_struct *p)
3194/** 3199/**
3195 * task_nice - return the nice value of a given task. 3200 * task_nice - return the nice value of a given task.
3196 * @p: the task in question. 3201 * @p: the task in question.
3202 *
3203 * Return: The nice value [ -20 ... 0 ... 19 ].
3197 */ 3204 */
3198int task_nice(const struct task_struct *p) 3205int task_nice(const struct task_struct *p)
3199{ 3206{
@@ -3204,6 +3211,8 @@ EXPORT_SYMBOL(task_nice);
3204/** 3211/**
3205 * idle_cpu - is a given cpu idle currently? 3212 * idle_cpu - is a given cpu idle currently?
3206 * @cpu: the processor in question. 3213 * @cpu: the processor in question.
3214 *
3215 * Return: 1 if the CPU is currently idle. 0 otherwise.
3207 */ 3216 */
3208int idle_cpu(int cpu) 3217int idle_cpu(int cpu)
3209{ 3218{
@@ -3226,6 +3235,8 @@ int idle_cpu(int cpu)
3226/** 3235/**
3227 * idle_task - return the idle task for a given cpu. 3236 * idle_task - return the idle task for a given cpu.
3228 * @cpu: the processor in question. 3237 * @cpu: the processor in question.
3238 *
3239 * Return: The idle task for the cpu @cpu.
3229 */ 3240 */
3230struct task_struct *idle_task(int cpu) 3241struct task_struct *idle_task(int cpu)
3231{ 3242{
@@ -3235,6 +3246,8 @@ struct task_struct *idle_task(int cpu)
3235/** 3246/**
3236 * find_process_by_pid - find a process with a matching PID value. 3247 * find_process_by_pid - find a process with a matching PID value.
3237 * @pid: the pid in question. 3248 * @pid: the pid in question.
3249 *
3250 * The task of @pid, if found. %NULL otherwise.
3238 */ 3251 */
3239static struct task_struct *find_process_by_pid(pid_t pid) 3252static struct task_struct *find_process_by_pid(pid_t pid)
3240{ 3253{
@@ -3432,6 +3445,8 @@ recheck:
3432 * @policy: new policy. 3445 * @policy: new policy.
3433 * @param: structure containing the new RT priority. 3446 * @param: structure containing the new RT priority.
3434 * 3447 *
3448 * Return: 0 on success. An error code otherwise.
3449 *
3435 * NOTE that the task may be already dead. 3450 * NOTE that the task may be already dead.
3436 */ 3451 */
3437int sched_setscheduler(struct task_struct *p, int policy, 3452int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3466,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
3451 * current context has permission. For example, this is needed in 3466 * current context has permission. For example, this is needed in
3452 * stop_machine(): we create temporary high priority worker threads, 3467 * stop_machine(): we create temporary high priority worker threads,
3453 * but our caller might not have that capability. 3468 * but our caller might not have that capability.
3469 *
3470 * Return: 0 on success. An error code otherwise.
3454 */ 3471 */
3455int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3472int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3456 const struct sched_param *param) 3473 const struct sched_param *param)
@@ -3485,6 +3502,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3485 * @pid: the pid in question. 3502 * @pid: the pid in question.
3486 * @policy: new policy. 3503 * @policy: new policy.
3487 * @param: structure containing the new RT priority. 3504 * @param: structure containing the new RT priority.
3505 *
3506 * Return: 0 on success. An error code otherwise.
3488 */ 3507 */
3489SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3508SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3490 struct sched_param __user *, param) 3509 struct sched_param __user *, param)
@@ -3500,6 +3519,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3500 * sys_sched_setparam - set/change the RT priority of a thread 3519 * sys_sched_setparam - set/change the RT priority of a thread
3501 * @pid: the pid in question. 3520 * @pid: the pid in question.
3502 * @param: structure containing the new RT priority. 3521 * @param: structure containing the new RT priority.
3522 *
3523 * Return: 0 on success. An error code otherwise.
3503 */ 3524 */
3504SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3525SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3505{ 3526{
@@ -3509,6 +3530,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3509/** 3530/**
3510 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3531 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3511 * @pid: the pid in question. 3532 * @pid: the pid in question.
3533 *
3534 * Return: On success, the policy of the thread. Otherwise, a negative error
3535 * code.
3512 */ 3536 */
3513SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3537SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3514{ 3538{
@@ -3535,6 +3559,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3535 * sys_sched_getparam - get the RT priority of a thread 3559 * sys_sched_getparam - get the RT priority of a thread
3536 * @pid: the pid in question. 3560 * @pid: the pid in question.
3537 * @param: structure containing the RT priority. 3561 * @param: structure containing the RT priority.
3562 *
3563 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3564 * code.
3538 */ 3565 */
3539SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3566SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3540{ 3567{
@@ -3659,6 +3686,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3659 * @pid: pid of the process 3686 * @pid: pid of the process
3660 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3687 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3661 * @user_mask_ptr: user-space pointer to the new cpu mask 3688 * @user_mask_ptr: user-space pointer to the new cpu mask
3689 *
3690 * Return: 0 on success. An error code otherwise.
3662 */ 3691 */
3663SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 3692SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3664 unsigned long __user *, user_mask_ptr) 3693 unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3739,8 @@ out_unlock:
3710 * @pid: pid of the process 3739 * @pid: pid of the process
3711 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3740 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3712 * @user_mask_ptr: user-space pointer to hold the current cpu mask 3741 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3742 *
3743 * Return: 0 on success. An error code otherwise.
3713 */ 3744 */
3714SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 3745SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3715 unsigned long __user *, user_mask_ptr) 3746 unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3775,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3744 * 3775 *
3745 * This function yields the current CPU to other tasks. If there are no 3776 * This function yields the current CPU to other tasks. If there are no
3746 * other threads running on this CPU then this function will return. 3777 * other threads running on this CPU then this function will return.
3778 *
3779 * Return: 0.
3747 */ 3780 */
3748SYSCALL_DEFINE0(sched_yield) 3781SYSCALL_DEFINE0(sched_yield)
3749{ 3782{
@@ -3869,7 +3902,7 @@ EXPORT_SYMBOL(yield);
3869 * It's the caller's job to ensure that the target task struct 3902 * It's the caller's job to ensure that the target task struct
3870 * can't go away on us before we can do any checks. 3903 * can't go away on us before we can do any checks.
3871 * 3904 *
3872 * Returns: 3905 * Return:
3873 * true (>0) if we indeed boosted the target task. 3906 * true (>0) if we indeed boosted the target task.
3874 * false (0) if we failed to boost the target. 3907 * false (0) if we failed to boost the target.
3875 * -ESRCH if there's no task to yield to. 3908 * -ESRCH if there's no task to yield to.
@@ -3972,8 +4005,9 @@ long __sched io_schedule_timeout(long timeout)
3972 * sys_sched_get_priority_max - return maximum RT priority. 4005 * sys_sched_get_priority_max - return maximum RT priority.
3973 * @policy: scheduling class. 4006 * @policy: scheduling class.
3974 * 4007 *
3975 * this syscall returns the maximum rt_priority that can be used 4008 * Return: On success, this syscall returns the maximum
3976 * by a given scheduling class. 4009 * rt_priority that can be used by a given scheduling class.
4010 * On failure, a negative error code is returned.
3977 */ 4011 */
3978SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4012SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3979{ 4013{
@@ -3997,8 +4031,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3997 * sys_sched_get_priority_min - return minimum RT priority. 4031 * sys_sched_get_priority_min - return minimum RT priority.
3998 * @policy: scheduling class. 4032 * @policy: scheduling class.
3999 * 4033 *
4000 * this syscall returns the minimum rt_priority that can be used 4034 * Return: On success, this syscall returns the minimum
4001 * by a given scheduling class. 4035 * rt_priority that can be used by a given scheduling class.
4036 * On failure, a negative error code is returned.
4002 */ 4037 */
4003SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4038SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4004{ 4039{
@@ -4024,6 +4059,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4024 * 4059 *
4025 * this syscall writes the default timeslice value of a given process 4060 * this syscall writes the default timeslice value of a given process
4026 * into the user-space timespec buffer. A value of '0' means infinity. 4061 * into the user-space timespec buffer. A value of '0' means infinity.
4062 *
4063 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4064 * an error code.
4027 */ 4065 */
4028SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4066SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4029 struct timespec __user *, interval) 4067 struct timespec __user *, interval)
@@ -6632,6 +6670,8 @@ void normalize_rt_tasks(void)
6632 * @cpu: the processor in question. 6670 * @cpu: the processor in question.
6633 * 6671 *
6634 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6672 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6673 *
6674 * Return: The current task for @cpu.
6635 */ 6675 */
6636struct task_struct *curr_task(int cpu) 6676struct task_struct *curr_task(int cpu)
6637{ 6677{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46f..8b836b376d91 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
62 * any discrepancies created by racing against the uncertainty of the current 62 * any discrepancies created by racing against the uncertainty of the current
63 * priority configuration. 63 * priority configuration.
64 * 64 *
65 * Returns: (int)bool - CPUs were found 65 * Return: (int)bool - CPUs were found
66 */ 66 */
67int cpupri_find(struct cpupri *cp, struct task_struct *p, 67int cpupri_find(struct cpupri *cp, struct task_struct *p,
68 struct cpumask *lowest_mask) 68 struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
203 * cpupri_init - initialize the cpupri structure 203 * cpupri_init - initialize the cpupri structure
204 * @cp: The cpupri context 204 * @cp: The cpupri context
205 * 205 *
206 * Returns: -ENOMEM if memory fails. 206 * Return: -ENOMEM on memory allocation failure.
207 */ 207 */
208int cpupri_init(struct cpupri *cp) 208int cpupri_init(struct cpupri *cp)
209{ 209{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f77f9c527449..98d135584b4b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4280,6 +4280,8 @@ struct sg_lb_stats {
4280 * get_sd_load_idx - Obtain the load index for a given sched domain. 4280 * get_sd_load_idx - Obtain the load index for a given sched domain.
4281 * @sd: The sched_domain whose load_idx is to be obtained. 4281 * @sd: The sched_domain whose load_idx is to be obtained.
4282 * @idle: The Idle status of the CPU for whose sd load_icx is obtained. 4282 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4283 *
4284 * Return: The load index.
4283 */ 4285 */
4284static inline int get_sd_load_idx(struct sched_domain *sd, 4286static inline int get_sd_load_idx(struct sched_domain *sd,
4285 enum cpu_idle_type idle) 4287 enum cpu_idle_type idle)
@@ -4574,6 +4576,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
4574 * 4576 *
4575 * Determine if @sg is a busier group than the previously selected 4577 * Determine if @sg is a busier group than the previously selected
4576 * busiest group. 4578 * busiest group.
4579 *
4580 * Return: %true if @sg is a busier group than the previously selected
4581 * busiest group. %false otherwise.
4577 */ 4582 */
4578static bool update_sd_pick_busiest(struct lb_env *env, 4583static bool update_sd_pick_busiest(struct lb_env *env,
4579 struct sd_lb_stats *sds, 4584 struct sd_lb_stats *sds,
@@ -4691,7 +4696,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
4691 * assuming lower CPU number will be equivalent to lower a SMT thread 4696 * assuming lower CPU number will be equivalent to lower a SMT thread
4692 * number. 4697 * number.
4693 * 4698 *
4694 * Returns 1 when packing is required and a task should be moved to 4699 * Return: 1 when packing is required and a task should be moved to
4695 * this CPU. The amount of the imbalance is returned in *imbalance. 4700 * this CPU. The amount of the imbalance is returned in *imbalance.
4696 * 4701 *
4697 * @env: The load balancing environment. 4702 * @env: The load balancing environment.
@@ -4869,7 +4874,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4869 * @balance: Pointer to a variable indicating if this_cpu 4874 * @balance: Pointer to a variable indicating if this_cpu
4870 * is the appropriate cpu to perform load balancing at this_level. 4875 * is the appropriate cpu to perform load balancing at this_level.
4871 * 4876 *
4872 * Returns: - the busiest group if imbalance exists. 4877 * Return: - The busiest group if imbalance exists.
4873 * - If no imbalance and user has opted for power-savings balance, 4878 * - If no imbalance and user has opted for power-savings balance,
4874 * return the least loaded group whose CPUs can be 4879 * return the least loaded group whose CPUs can be
4875 * put to idle by rebalancing its tasks onto our group. 4880 * put to idle by rebalancing its tasks onto our group.