diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpuset.c | 6 | ||||
| -rw-r--r-- | kernel/fork.c | 6 | ||||
| -rw-r--r-- | kernel/mutex.c | 4 | ||||
| -rw-r--r-- | kernel/power/qos.c | 20 | ||||
| -rw-r--r-- | kernel/sched/core.c | 96 | ||||
| -rw-r--r-- | kernel/sched/cpupri.c | 4 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 10 | ||||
| -rw-r--r-- | kernel/time/sched_clock.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 5 | ||||
| -rw-r--r-- | kernel/wait.c | 3 |
10 files changed, 113 insertions, 43 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e5657788fedd..010a0083c0ae 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1608,11 +1608,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |||
| 1608 | { | 1608 | { |
| 1609 | struct cpuset *cs = cgroup_cs(cgrp); | 1609 | struct cpuset *cs = cgroup_cs(cgrp); |
| 1610 | cpuset_filetype_t type = cft->private; | 1610 | cpuset_filetype_t type = cft->private; |
| 1611 | int retval = -ENODEV; | 1611 | int retval = 0; |
| 1612 | 1612 | ||
| 1613 | mutex_lock(&cpuset_mutex); | 1613 | mutex_lock(&cpuset_mutex); |
| 1614 | if (!is_cpuset_online(cs)) | 1614 | if (!is_cpuset_online(cs)) { |
| 1615 | retval = -ENODEV; | ||
| 1615 | goto out_unlock; | 1616 | goto out_unlock; |
| 1617 | } | ||
| 1616 | 1618 | ||
| 1617 | switch (type) { | 1619 | switch (type) { |
| 1618 | case FILE_CPU_EXCLUSIVE: | 1620 | case FILE_CPU_EXCLUSIVE: |
diff --git a/kernel/fork.c b/kernel/fork.c index 403d2bb8a968..e23bb19e2a3e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, | |||
| 1679 | int __user *, parent_tidptr, | 1679 | int __user *, parent_tidptr, |
| 1680 | int __user *, child_tidptr, | 1680 | int __user *, child_tidptr, |
| 1681 | int, tls_val) | 1681 | int, tls_val) |
| 1682 | #elif defined(CONFIG_CLONE_BACKWARDS3) | ||
| 1683 | SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, | ||
| 1684 | int, stack_size, | ||
| 1685 | int __user *, parent_tidptr, | ||
| 1686 | int __user *, child_tidptr, | ||
| 1687 | int, tls_val) | ||
| 1682 | #else | 1688 | #else |
| 1683 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, | 1689 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
| 1684 | int __user *, parent_tidptr, | 1690 | int __user *, parent_tidptr, |
diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86eb..a52ee7bb830d 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
| @@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
| 686 | might_sleep(); | 686 | might_sleep(); |
| 687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, | 687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
| 688 | 0, &ctx->dep_map, _RET_IP_, ctx); | 688 | 0, &ctx->dep_map, _RET_IP_, ctx); |
| 689 | if (!ret && ctx->acquired > 0) | 689 | if (!ret && ctx->acquired > 1) |
| 690 | return ww_mutex_deadlock_injection(lock, ctx); | 690 | return ww_mutex_deadlock_injection(lock, ctx); |
| 691 | 691 | ||
| 692 | return ret; | 692 | return ret; |
| @@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
| 702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, | 702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
| 703 | 0, &ctx->dep_map, _RET_IP_, ctx); | 703 | 0, &ctx->dep_map, _RET_IP_, ctx); |
| 704 | 704 | ||
| 705 | if (!ret && ctx->acquired > 0) | 705 | if (!ret && ctx->acquired > 1) |
| 706 | return ww_mutex_deadlock_injection(lock, ctx); | 706 | return ww_mutex_deadlock_injection(lock, ctx); |
| 707 | 707 | ||
| 708 | return ret; | 708 | return ret; |
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 06fe28589e9c..a394297f8b2f 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
| @@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req) | |||
| 296 | } | 296 | } |
| 297 | EXPORT_SYMBOL_GPL(pm_qos_request_active); | 297 | EXPORT_SYMBOL_GPL(pm_qos_request_active); |
| 298 | 298 | ||
| 299 | static void __pm_qos_update_request(struct pm_qos_request *req, | ||
| 300 | s32 new_value) | ||
| 301 | { | ||
| 302 | trace_pm_qos_update_request(req->pm_qos_class, new_value); | ||
| 303 | |||
| 304 | if (new_value != req->node.prio) | ||
| 305 | pm_qos_update_target( | ||
| 306 | pm_qos_array[req->pm_qos_class]->constraints, | ||
| 307 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
| 308 | } | ||
| 309 | |||
| 299 | /** | 310 | /** |
| 300 | * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout | 311 | * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout |
| 301 | * @work: work struct for the delayed work (timeout) | 312 | * @work: work struct for the delayed work (timeout) |
| @@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work) | |||
| 308 | struct pm_qos_request, | 319 | struct pm_qos_request, |
| 309 | work); | 320 | work); |
| 310 | 321 | ||
| 311 | pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); | 322 | __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); |
| 312 | } | 323 | } |
| 313 | 324 | ||
| 314 | /** | 325 | /** |
| @@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req, | |||
| 364 | } | 375 | } |
| 365 | 376 | ||
| 366 | cancel_delayed_work_sync(&req->work); | 377 | cancel_delayed_work_sync(&req->work); |
| 367 | 378 | __pm_qos_update_request(req, new_value); | |
| 368 | trace_pm_qos_update_request(req->pm_qos_class, new_value); | ||
| 369 | if (new_value != req->node.prio) | ||
| 370 | pm_qos_update_target( | ||
| 371 | pm_qos_array[req->pm_qos_class]->constraints, | ||
| 372 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
| 373 | } | 379 | } |
| 374 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 380 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
| 375 | 381 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfeb..05c39f030314 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p) | |||
| 933 | /** | 933 | /** |
| 934 | * task_curr - is this task currently executing on a CPU? | 934 | * task_curr - is this task currently executing on a CPU? |
| 935 | * @p: the task in question. | 935 | * @p: the task in question. |
| 936 | * | ||
| 937 | * Return: 1 if the task is currently executing. 0 otherwise. | ||
| 936 | */ | 938 | */ |
| 937 | inline int task_curr(const struct task_struct *p) | 939 | inline int task_curr(const struct task_struct *p) |
| 938 | { | 940 | { |
| @@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu) | |||
| 1482 | * the simpler "current->state = TASK_RUNNING" to mark yourself | 1484 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
| 1483 | * runnable without the overhead of this. | 1485 | * runnable without the overhead of this. |
| 1484 | * | 1486 | * |
| 1485 | * Returns %true if @p was woken up, %false if it was already running | 1487 | * Return: %true if @p was woken up, %false if it was already running. |
| 1486 | * or @state didn't match @p's state. | 1488 | * or @state didn't match @p's state. |
| 1487 | */ | 1489 | */ |
| 1488 | static int | 1490 | static int |
| @@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
| 1491 | unsigned long flags; | 1493 | unsigned long flags; |
| 1492 | int cpu, success = 0; | 1494 | int cpu, success = 0; |
| 1493 | 1495 | ||
| 1494 | smp_wmb(); | 1496 | /* |
| 1497 | * If we are going to wake up a thread waiting for CONDITION we | ||
| 1498 | * need to ensure that CONDITION=1 done by the caller can not be | ||
| 1499 | * reordered with p->state check below. This pairs with mb() in | ||
| 1500 | * set_current_state() the waiting thread does. | ||
| 1501 | */ | ||
| 1502 | smp_mb__before_spinlock(); | ||
| 1495 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 1503 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 1496 | if (!(p->state & state)) | 1504 | if (!(p->state & state)) |
| 1497 | goto out; | 1505 | goto out; |
| @@ -1577,8 +1585,9 @@ out: | |||
| 1577 | * @p: The process to be woken up. | 1585 | * @p: The process to be woken up. |
| 1578 | * | 1586 | * |
| 1579 | * Attempt to wake up the nominated process and move it to the set of runnable | 1587 | * Attempt to wake up the nominated process and move it to the set of runnable |
| 1580 | * processes. Returns 1 if the process was woken up, 0 if it was already | 1588 | * processes. |
| 1581 | * running. | 1589 | * |
| 1590 | * Return: 1 if the process was woken up, 0 if it was already running. | ||
| 1582 | * | 1591 | * |
| 1583 | * It may be assumed that this function implies a write memory barrier before | 1592 | * It may be assumed that this function implies a write memory barrier before |
| 1584 | * changing the task state if and only if any tasks are woken up. | 1593 | * changing the task state if and only if any tasks are woken up. |
| @@ -2191,6 +2200,8 @@ void scheduler_tick(void) | |||
| 2191 | * This makes sure that uptime, CFS vruntime, load | 2200 | * This makes sure that uptime, CFS vruntime, load |
| 2192 | * balancing, etc... continue to move forward, even | 2201 | * balancing, etc... continue to move forward, even |
| 2193 | * with a very low granularity. | 2202 | * with a very low granularity. |
| 2203 | * | ||
| 2204 | * Return: Maximum deferment in nanoseconds. | ||
| 2194 | */ | 2205 | */ |
| 2195 | u64 scheduler_tick_max_deferment(void) | 2206 | u64 scheduler_tick_max_deferment(void) |
| 2196 | { | 2207 | { |
| @@ -2394,6 +2405,12 @@ need_resched: | |||
| 2394 | if (sched_feat(HRTICK)) | 2405 | if (sched_feat(HRTICK)) |
| 2395 | hrtick_clear(rq); | 2406 | hrtick_clear(rq); |
| 2396 | 2407 | ||
| 2408 | /* | ||
| 2409 | * Make sure that signal_pending_state()->signal_pending() below | ||
| 2410 | * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) | ||
| 2411 | * done by the caller to avoid the race with signal_wake_up(). | ||
| 2412 | */ | ||
| 2413 | smp_mb__before_spinlock(); | ||
| 2397 | raw_spin_lock_irq(&rq->lock); | 2414 | raw_spin_lock_irq(&rq->lock); |
| 2398 | 2415 | ||
| 2399 | switch_count = &prev->nivcsw; | 2416 | switch_count = &prev->nivcsw; |
| @@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion); | |||
| 2796 | * specified timeout to expire. The timeout is in jiffies. It is not | 2813 | * specified timeout to expire. The timeout is in jiffies. It is not |
| 2797 | * interruptible. | 2814 | * interruptible. |
| 2798 | * | 2815 | * |
| 2799 | * The return value is 0 if timed out, and positive (at least 1, or number of | 2816 | * Return: 0 if timed out, and positive (at least 1, or number of jiffies left |
| 2800 | * jiffies left till timeout) if completed. | 2817 | * till timeout) if completed. |
| 2801 | */ | 2818 | */ |
| 2802 | unsigned long __sched | 2819 | unsigned long __sched |
| 2803 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | 2820 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
| @@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io); | |||
| 2829 | * specified timeout to expire. The timeout is in jiffies. It is not | 2846 | * specified timeout to expire. The timeout is in jiffies. It is not |
| 2830 | * interruptible. The caller is accounted as waiting for IO. | 2847 | * interruptible. The caller is accounted as waiting for IO. |
| 2831 | * | 2848 | * |
| 2832 | * The return value is 0 if timed out, and positive (at least 1, or number of | 2849 | * Return: 0 if timed out, and positive (at least 1, or number of jiffies left |
| 2833 | * jiffies left till timeout) if completed. | 2850 | * till timeout) if completed. |
| 2834 | */ | 2851 | */ |
| 2835 | unsigned long __sched | 2852 | unsigned long __sched |
| 2836 | wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) | 2853 | wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) |
| @@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout); | |||
| 2846 | * This waits for completion of a specific task to be signaled. It is | 2863 | * This waits for completion of a specific task to be signaled. It is |
| 2847 | * interruptible. | 2864 | * interruptible. |
| 2848 | * | 2865 | * |
| 2849 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | 2866 | * Return: -ERESTARTSYS if interrupted, 0 if completed. |
| 2850 | */ | 2867 | */ |
| 2851 | int __sched wait_for_completion_interruptible(struct completion *x) | 2868 | int __sched wait_for_completion_interruptible(struct completion *x) |
| 2852 | { | 2869 | { |
| @@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); | |||
| 2865 | * This waits for either a completion of a specific task to be signaled or for a | 2882 | * This waits for either a completion of a specific task to be signaled or for a |
| 2866 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. | 2883 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. |
| 2867 | * | 2884 | * |
| 2868 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | 2885 | * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, |
| 2869 | * positive (at least 1, or number of jiffies left till timeout) if completed. | 2886 | * or number of jiffies left till timeout) if completed. |
| 2870 | */ | 2887 | */ |
| 2871 | long __sched | 2888 | long __sched |
| 2872 | wait_for_completion_interruptible_timeout(struct completion *x, | 2889 | wait_for_completion_interruptible_timeout(struct completion *x, |
| @@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | |||
| 2883 | * This waits to be signaled for completion of a specific task. It can be | 2900 | * This waits to be signaled for completion of a specific task. It can be |
| 2884 | * interrupted by a kill signal. | 2901 | * interrupted by a kill signal. |
| 2885 | * | 2902 | * |
| 2886 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | 2903 | * Return: -ERESTARTSYS if interrupted, 0 if completed. |
| 2887 | */ | 2904 | */ |
| 2888 | int __sched wait_for_completion_killable(struct completion *x) | 2905 | int __sched wait_for_completion_killable(struct completion *x) |
| 2889 | { | 2906 | { |
| @@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable); | |||
| 2903 | * signaled or for a specified timeout to expire. It can be | 2920 | * signaled or for a specified timeout to expire. It can be |
| 2904 | * interrupted by a kill signal. The timeout is in jiffies. | 2921 | * interrupted by a kill signal. The timeout is in jiffies. |
| 2905 | * | 2922 | * |
| 2906 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | 2923 | * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, |
| 2907 | * positive (at least 1, or number of jiffies left till timeout) if completed. | 2924 | * or number of jiffies left till timeout) if completed. |
| 2908 | */ | 2925 | */ |
| 2909 | long __sched | 2926 | long __sched |
| 2910 | wait_for_completion_killable_timeout(struct completion *x, | 2927 | wait_for_completion_killable_timeout(struct completion *x, |
| @@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout); | |||
| 2918 | * try_wait_for_completion - try to decrement a completion without blocking | 2935 | * try_wait_for_completion - try to decrement a completion without blocking |
| 2919 | * @x: completion structure | 2936 | * @x: completion structure |
| 2920 | * | 2937 | * |
| 2921 | * Returns: 0 if a decrement cannot be done without blocking | 2938 | * Return: 0 if a decrement cannot be done without blocking |
| 2922 | * 1 if a decrement succeeded. | 2939 | * 1 if a decrement succeeded. |
| 2923 | * | 2940 | * |
| 2924 | * If a completion is being used as a counting completion, | 2941 | * If a completion is being used as a counting completion, |
| @@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion); | |||
| 2945 | * completion_done - Test to see if a completion has any waiters | 2962 | * completion_done - Test to see if a completion has any waiters |
| 2946 | * @x: completion structure | 2963 | * @x: completion structure |
| 2947 | * | 2964 | * |
| 2948 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | 2965 | * Return: 0 if there are waiters (wait_for_completion() in progress) |
| 2949 | * 1 if there are no waiters. | 2966 | * 1 if there are no waiters. |
| 2950 | * | 2967 | * |
| 2951 | */ | 2968 | */ |
| @@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment) | |||
| 3182 | * task_prio - return the priority value of a given task. | 3199 | * task_prio - return the priority value of a given task. |
| 3183 | * @p: the task in question. | 3200 | * @p: the task in question. |
| 3184 | * | 3201 | * |
| 3185 | * This is the priority value as seen by users in /proc. | 3202 | * Return: The priority value as seen by users in /proc. |
| 3186 | * RT tasks are offset by -200. Normal tasks are centered | 3203 | * RT tasks are offset by -200. Normal tasks are centered |
| 3187 | * around 0, value goes from -16 to +15. | 3204 | * around 0, value goes from -16 to +15. |
| 3188 | */ | 3205 | */ |
| @@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p) | |||
| 3194 | /** | 3211 | /** |
| 3195 | * task_nice - return the nice value of a given task. | 3212 | * task_nice - return the nice value of a given task. |
| 3196 | * @p: the task in question. | 3213 | * @p: the task in question. |
| 3214 | * | ||
| 3215 | * Return: The nice value [ -20 ... 0 ... 19 ]. | ||
| 3197 | */ | 3216 | */ |
| 3198 | int task_nice(const struct task_struct *p) | 3217 | int task_nice(const struct task_struct *p) |
| 3199 | { | 3218 | { |
| @@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice); | |||
| 3204 | /** | 3223 | /** |
| 3205 | * idle_cpu - is a given cpu idle currently? | 3224 | * idle_cpu - is a given cpu idle currently? |
| 3206 | * @cpu: the processor in question. | 3225 | * @cpu: the processor in question. |
| 3226 | * | ||
| 3227 | * Return: 1 if the CPU is currently idle. 0 otherwise. | ||
| 3207 | */ | 3228 | */ |
| 3208 | int idle_cpu(int cpu) | 3229 | int idle_cpu(int cpu) |
| 3209 | { | 3230 | { |
| @@ -3226,6 +3247,8 @@ int idle_cpu(int cpu) | |||
| 3226 | /** | 3247 | /** |
| 3227 | * idle_task - return the idle task for a given cpu. | 3248 | * idle_task - return the idle task for a given cpu. |
| 3228 | * @cpu: the processor in question. | 3249 | * @cpu: the processor in question. |
| 3250 | * | ||
| 3251 | * Return: The idle task for the cpu @cpu. | ||
| 3229 | */ | 3252 | */ |
| 3230 | struct task_struct *idle_task(int cpu) | 3253 | struct task_struct *idle_task(int cpu) |
| 3231 | { | 3254 | { |
| @@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu) | |||
| 3235 | /** | 3258 | /** |
| 3236 | * find_process_by_pid - find a process with a matching PID value. | 3259 | * find_process_by_pid - find a process with a matching PID value. |
| 3237 | * @pid: the pid in question. | 3260 | * @pid: the pid in question. |
| 3261 | * | ||
| 3262 | * The task of @pid, if found. %NULL otherwise. | ||
| 3238 | */ | 3263 | */ |
| 3239 | static struct task_struct *find_process_by_pid(pid_t pid) | 3264 | static struct task_struct *find_process_by_pid(pid_t pid) |
| 3240 | { | 3265 | { |
| @@ -3432,6 +3457,8 @@ recheck: | |||
| 3432 | * @policy: new policy. | 3457 | * @policy: new policy. |
| 3433 | * @param: structure containing the new RT priority. | 3458 | * @param: structure containing the new RT priority. |
| 3434 | * | 3459 | * |
| 3460 | * Return: 0 on success. An error code otherwise. | ||
| 3461 | * | ||
| 3435 | * NOTE that the task may be already dead. | 3462 | * NOTE that the task may be already dead. |
| 3436 | */ | 3463 | */ |
| 3437 | int sched_setscheduler(struct task_struct *p, int policy, | 3464 | int sched_setscheduler(struct task_struct *p, int policy, |
| @@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); | |||
| 3451 | * current context has permission. For example, this is needed in | 3478 | * current context has permission. For example, this is needed in |
| 3452 | * stop_machine(): we create temporary high priority worker threads, | 3479 | * stop_machine(): we create temporary high priority worker threads, |
| 3453 | * but our caller might not have that capability. | 3480 | * but our caller might not have that capability. |
| 3481 | * | ||
| 3482 | * Return: 0 on success. An error code otherwise. | ||
| 3454 | */ | 3483 | */ |
| 3455 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, | 3484 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
| 3456 | const struct sched_param *param) | 3485 | const struct sched_param *param) |
| @@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
| 3485 | * @pid: the pid in question. | 3514 | * @pid: the pid in question. |
| 3486 | * @policy: new policy. | 3515 | * @policy: new policy. |
| 3487 | * @param: structure containing the new RT priority. | 3516 | * @param: structure containing the new RT priority. |
| 3517 | * | ||
| 3518 | * Return: 0 on success. An error code otherwise. | ||
| 3488 | */ | 3519 | */ |
| 3489 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, | 3520 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
| 3490 | struct sched_param __user *, param) | 3521 | struct sched_param __user *, param) |
| @@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, | |||
| 3500 | * sys_sched_setparam - set/change the RT priority of a thread | 3531 | * sys_sched_setparam - set/change the RT priority of a thread |
| 3501 | * @pid: the pid in question. | 3532 | * @pid: the pid in question. |
| 3502 | * @param: structure containing the new RT priority. | 3533 | * @param: structure containing the new RT priority. |
| 3534 | * | ||
| 3535 | * Return: 0 on success. An error code otherwise. | ||
| 3503 | */ | 3536 | */ |
| 3504 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | 3537 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
| 3505 | { | 3538 | { |
| @@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
| 3509 | /** | 3542 | /** |
| 3510 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | 3543 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
| 3511 | * @pid: the pid in question. | 3544 | * @pid: the pid in question. |
| 3545 | * | ||
| 3546 | * Return: On success, the policy of the thread. Otherwise, a negative error | ||
| 3547 | * code. | ||
| 3512 | */ | 3548 | */ |
| 3513 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | 3549 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
| 3514 | { | 3550 | { |
| @@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
| 3535 | * sys_sched_getparam - get the RT priority of a thread | 3571 | * sys_sched_getparam - get the RT priority of a thread |
| 3536 | * @pid: the pid in question. | 3572 | * @pid: the pid in question. |
| 3537 | * @param: structure containing the RT priority. | 3573 | * @param: structure containing the RT priority. |
| 3574 | * | ||
| 3575 | * Return: On success, 0 and the RT priority is in @param. Otherwise, an error | ||
| 3576 | * code. | ||
| 3538 | */ | 3577 | */ |
| 3539 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | 3578 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
| 3540 | { | 3579 | { |
| @@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
| 3659 | * @pid: pid of the process | 3698 | * @pid: pid of the process |
| 3660 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 3699 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 3661 | * @user_mask_ptr: user-space pointer to the new cpu mask | 3700 | * @user_mask_ptr: user-space pointer to the new cpu mask |
| 3701 | * | ||
| 3702 | * Return: 0 on success. An error code otherwise. | ||
| 3662 | */ | 3703 | */ |
| 3663 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, | 3704 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
| 3664 | unsigned long __user *, user_mask_ptr) | 3705 | unsigned long __user *, user_mask_ptr) |
| @@ -3710,6 +3751,8 @@ out_unlock: | |||
| 3710 | * @pid: pid of the process | 3751 | * @pid: pid of the process |
| 3711 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 3752 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 3712 | * @user_mask_ptr: user-space pointer to hold the current cpu mask | 3753 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
| 3754 | * | ||
| 3755 | * Return: 0 on success. An error code otherwise. | ||
| 3713 | */ | 3756 | */ |
| 3714 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | 3757 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
| 3715 | unsigned long __user *, user_mask_ptr) | 3758 | unsigned long __user *, user_mask_ptr) |
| @@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
| 3744 | * | 3787 | * |
| 3745 | * This function yields the current CPU to other tasks. If there are no | 3788 | * This function yields the current CPU to other tasks. If there are no |
| 3746 | * other threads running on this CPU then this function will return. | 3789 | * other threads running on this CPU then this function will return. |
| 3790 | * | ||
| 3791 | * Return: 0. | ||
| 3747 | */ | 3792 | */ |
| 3748 | SYSCALL_DEFINE0(sched_yield) | 3793 | SYSCALL_DEFINE0(sched_yield) |
| 3749 | { | 3794 | { |
| @@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield); | |||
| 3869 | * It's the caller's job to ensure that the target task struct | 3914 | * It's the caller's job to ensure that the target task struct |
| 3870 | * can't go away on us before we can do any checks. | 3915 | * can't go away on us before we can do any checks. |
| 3871 | * | 3916 | * |
| 3872 | * Returns: | 3917 | * Return: |
| 3873 | * true (>0) if we indeed boosted the target task. | 3918 | * true (>0) if we indeed boosted the target task. |
| 3874 | * false (0) if we failed to boost the target. | 3919 | * false (0) if we failed to boost the target. |
| 3875 | * -ESRCH if there's no task to yield to. | 3920 | * -ESRCH if there's no task to yield to. |
| @@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout) | |||
| 3972 | * sys_sched_get_priority_max - return maximum RT priority. | 4017 | * sys_sched_get_priority_max - return maximum RT priority. |
| 3973 | * @policy: scheduling class. | 4018 | * @policy: scheduling class. |
| 3974 | * | 4019 | * |
| 3975 | * this syscall returns the maximum rt_priority that can be used | 4020 | * Return: On success, this syscall returns the maximum |
| 3976 | * by a given scheduling class. | 4021 | * rt_priority that can be used by a given scheduling class. |
| 4022 | * On failure, a negative error code is returned. | ||
| 3977 | */ | 4023 | */ |
| 3978 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) | 4024 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
| 3979 | { | 4025 | { |
| @@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) | |||
| 3997 | * sys_sched_get_priority_min - return minimum RT priority. | 4043 | * sys_sched_get_priority_min - return minimum RT priority. |
| 3998 | * @policy: scheduling class. | 4044 | * @policy: scheduling class. |
| 3999 | * | 4045 | * |
| 4000 | * this syscall returns the minimum rt_priority that can be used | 4046 | * Return: On success, this syscall returns the minimum |
| 4001 | * by a given scheduling class. | 4047 | * rt_priority that can be used by a given scheduling class. |
| 4048 | * On failure, a negative error code is returned. | ||
| 4002 | */ | 4049 | */ |
| 4003 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) | 4050 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
| 4004 | { | 4051 | { |
| @@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) | |||
| 4024 | * | 4071 | * |
| 4025 | * this syscall writes the default timeslice value of a given process | 4072 | * this syscall writes the default timeslice value of a given process |
| 4026 | * into the user-space timespec buffer. A value of '0' means infinity. | 4073 | * into the user-space timespec buffer. A value of '0' means infinity. |
| 4074 | * | ||
| 4075 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, | ||
| 4076 | * an error code. | ||
| 4027 | */ | 4077 | */ |
| 4028 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | 4078 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
| 4029 | struct timespec __user *, interval) | 4079 | struct timespec __user *, interval) |
| @@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void) | |||
| 6632 | * @cpu: the processor in question. | 6682 | * @cpu: the processor in question. |
| 6633 | * | 6683 | * |
| 6634 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! | 6684 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 6685 | * | ||
| 6686 | * Return: The current task for @cpu. | ||
| 6635 | */ | 6687 | */ |
| 6636 | struct task_struct *curr_task(int cpu) | 6688 | struct task_struct *curr_task(int cpu) |
| 6637 | { | 6689 | { |
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 1095e878a46f..8b836b376d91 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c | |||
| @@ -62,7 +62,7 @@ static int convert_prio(int prio) | |||
| 62 | * any discrepancies created by racing against the uncertainty of the current | 62 | * any discrepancies created by racing against the uncertainty of the current |
| 63 | * priority configuration. | 63 | * priority configuration. |
| 64 | * | 64 | * |
| 65 | * Returns: (int)bool - CPUs were found | 65 | * Return: (int)bool - CPUs were found |
| 66 | */ | 66 | */ |
| 67 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | 67 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
| 68 | struct cpumask *lowest_mask) | 68 | struct cpumask *lowest_mask) |
| @@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
| 203 | * cpupri_init - initialize the cpupri structure | 203 | * cpupri_init - initialize the cpupri structure |
| 204 | * @cp: The cpupri context | 204 | * @cp: The cpupri context |
| 205 | * | 205 | * |
| 206 | * Returns: -ENOMEM if memory fails. | 206 | * Return: -ENOMEM on memory allocation failure. |
| 207 | */ | 207 | */ |
| 208 | int cpupri_init(struct cpupri *cp) | 208 | int cpupri_init(struct cpupri *cp) |
| 209 | { | 209 | { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9565645e3202..68f1609ca149 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
| 2032 | */ | 2032 | */ |
| 2033 | update_entity_load_avg(curr, 1); | 2033 | update_entity_load_avg(curr, 1); |
| 2034 | update_cfs_rq_blocked_load(cfs_rq, 1); | 2034 | update_cfs_rq_blocked_load(cfs_rq, 1); |
| 2035 | update_cfs_shares(cfs_rq); | ||
| 2035 | 2036 | ||
| 2036 | #ifdef CONFIG_SCHED_HRTICK | 2037 | #ifdef CONFIG_SCHED_HRTICK |
| 2037 | /* | 2038 | /* |
| @@ -4280,6 +4281,8 @@ struct sg_lb_stats { | |||
| 4280 | * get_sd_load_idx - Obtain the load index for a given sched domain. | 4281 | * get_sd_load_idx - Obtain the load index for a given sched domain. |
| 4281 | * @sd: The sched_domain whose load_idx is to be obtained. | 4282 | * @sd: The sched_domain whose load_idx is to be obtained. |
| 4282 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | 4283 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. |
| 4284 | * | ||
| 4285 | * Return: The load index. | ||
| 4283 | */ | 4286 | */ |
| 4284 | static inline int get_sd_load_idx(struct sched_domain *sd, | 4287 | static inline int get_sd_load_idx(struct sched_domain *sd, |
| 4285 | enum cpu_idle_type idle) | 4288 | enum cpu_idle_type idle) |
| @@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
| 4574 | * | 4577 | * |
| 4575 | * Determine if @sg is a busier group than the previously selected | 4578 | * Determine if @sg is a busier group than the previously selected |
| 4576 | * busiest group. | 4579 | * busiest group. |
| 4580 | * | ||
| 4581 | * Return: %true if @sg is a busier group than the previously selected | ||
| 4582 | * busiest group. %false otherwise. | ||
| 4577 | */ | 4583 | */ |
| 4578 | static bool update_sd_pick_busiest(struct lb_env *env, | 4584 | static bool update_sd_pick_busiest(struct lb_env *env, |
| 4579 | struct sd_lb_stats *sds, | 4585 | struct sd_lb_stats *sds, |
| @@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
| 4691 | * assuming lower CPU number will be equivalent to lower a SMT thread | 4697 | * assuming lower CPU number will be equivalent to lower a SMT thread |
| 4692 | * number. | 4698 | * number. |
| 4693 | * | 4699 | * |
| 4694 | * Returns 1 when packing is required and a task should be moved to | 4700 | * Return: 1 when packing is required and a task should be moved to |
| 4695 | * this CPU. The amount of the imbalance is returned in *imbalance. | 4701 | * this CPU. The amount of the imbalance is returned in *imbalance. |
| 4696 | * | 4702 | * |
| 4697 | * @env: The load balancing environment. | 4703 | * @env: The load balancing environment. |
| @@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
| 4869 | * @balance: Pointer to a variable indicating if this_cpu | 4875 | * @balance: Pointer to a variable indicating if this_cpu |
| 4870 | * is the appropriate cpu to perform load balancing at this_level. | 4876 | * is the appropriate cpu to perform load balancing at this_level. |
| 4871 | * | 4877 | * |
| 4872 | * Returns: - the busiest group if imbalance exists. | 4878 | * Return: - The busiest group if imbalance exists. |
| 4873 | * - If no imbalance and user has opted for power-savings balance, | 4879 | * - If no imbalance and user has opted for power-savings balance, |
| 4874 | * return the least loaded group whose CPUs can be | 4880 | * return the least loaded group whose CPUs can be |
| 4875 | * put to idle by rebalancing its tasks onto our group. | 4881 | * put to idle by rebalancing its tasks onto our group. |
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a326f27d7f09..0b479a6a22bb 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
| @@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) | |||
| 121 | BUG_ON(bits > 32); | 121 | BUG_ON(bits > 32); |
| 122 | WARN_ON(!irqs_disabled()); | 122 | WARN_ON(!irqs_disabled()); |
| 123 | read_sched_clock = read; | 123 | read_sched_clock = read; |
| 124 | sched_clock_mask = (1 << bits) - 1; | 124 | sched_clock_mask = (1ULL << bits) - 1; |
| 125 | cd.rate = rate; | 125 | cd.rate = rate; |
| 126 | 126 | ||
| 127 | /* calculate the mult/shift to convert counter ticks to ns. */ | 127 | /* calculate the mult/shift to convert counter ticks to ns. */ |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e77edc97e036..e8a1516cc0a3 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -182,7 +182,8 @@ static bool can_stop_full_tick(void) | |||
| 182 | * Don't allow the user to think they can get | 182 | * Don't allow the user to think they can get |
| 183 | * full NO_HZ with this machine. | 183 | * full NO_HZ with this machine. |
| 184 | */ | 184 | */ |
| 185 | WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); | 185 | WARN_ONCE(have_nohz_full_mask, |
| 186 | "NO_HZ FULL will not work with unstable sched clock"); | ||
| 186 | return false; | 187 | return false; |
| 187 | } | 188 | } |
| 188 | #endif | 189 | #endif |
| @@ -343,8 +344,6 @@ static int tick_nohz_init_all(void) | |||
| 343 | 344 | ||
| 344 | void __init tick_nohz_init(void) | 345 | void __init tick_nohz_init(void) |
| 345 | { | 346 | { |
| 346 | int cpu; | ||
| 347 | |||
| 348 | if (!have_nohz_full_mask) { | 347 | if (!have_nohz_full_mask) { |
| 349 | if (tick_nohz_init_all() < 0) | 348 | if (tick_nohz_init_all() < 0) |
| 350 | return; | 349 | return; |
diff --git a/kernel/wait.c b/kernel/wait.c index dec68bd4e9d8..d550920e040c 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
| @@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); | |||
| 363 | 363 | ||
| 364 | /** | 364 | /** |
| 365 | * wake_up_atomic_t - Wake up a waiter on a atomic_t | 365 | * wake_up_atomic_t - Wake up a waiter on a atomic_t |
| 366 | * @word: The word being waited on, a kernel virtual address | 366 | * @p: The atomic_t being waited on, a kernel virtual address |
| 367 | * @bit: The bit of the word being waited on | ||
| 368 | * | 367 | * |
| 369 | * Wake up anyone waiting for the atomic_t to go to zero. | 368 | * Wake up anyone waiting for the atomic_t to go to zero. |
| 370 | * | 369 | * |
