aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorYacine Belkadi <yacine.belkadi.1@gmail.com>2013-07-31 17:59:24 -0400
committerJiri Kosina <jkosina@suse.cz>2013-08-20 06:57:25 -0400
commitd185af300fe43c130083851ca918ea2bb9600f0f (patch)
treea9785bc08f4854abfb6b8d4ffc4bc4330cacbc9e /kernel
parent1a5d6d2b1792245bcfd93aa7225f114a27cc45d3 (diff)
workqueue: fix some scripts/kernel-doc warnings
When building the htmldocs (in verbose mode), scripts/kernel-doc reports the following type of warnings: Warning(kernel/workqueue.c:653): No description found for return value of 'get_work_pool' Fix them by: - Using "Return:" sections to introduce descriptions of return values - Adding some missing descriptions Signed-off-by: Yacine Belkadi <yacine.belkadi.1@gmail.com> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c107
1 files changed, 66 insertions, 41 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0b72e816b8d0..7f01a3eeaf95 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -540,6 +540,8 @@ static int worker_pool_assign_id(struct worker_pool *pool)
540 * This must be called either with pwq_lock held or sched RCU read locked. 540 * This must be called either with pwq_lock held or sched RCU read locked.
541 * If the pwq needs to be used beyond the locking in effect, the caller is 541 * If the pwq needs to be used beyond the locking in effect, the caller is
542 * responsible for guaranteeing that the pwq stays online. 542 * responsible for guaranteeing that the pwq stays online.
543 *
544 * Return: The unbound pool_workqueue for @node.
543 */ 545 */
544static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, 546static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
545 int node) 547 int node)
@@ -638,8 +640,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
638 * get_work_pool - return the worker_pool a given work was associated with 640 * get_work_pool - return the worker_pool a given work was associated with
639 * @work: the work item of interest 641 * @work: the work item of interest
640 * 642 *
641 * Return the worker_pool @work was last associated with. %NULL if none.
642 *
643 * Pools are created and destroyed under wq_pool_mutex, and allows read 643 * Pools are created and destroyed under wq_pool_mutex, and allows read
644 * access under sched-RCU read lock. As such, this function should be 644 * access under sched-RCU read lock. As such, this function should be
645 * called under wq_pool_mutex or with preemption disabled. 645 * called under wq_pool_mutex or with preemption disabled.
@@ -648,6 +648,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
648 * mentioned locking is in effect. If the returned pool needs to be used 648 * mentioned locking is in effect. If the returned pool needs to be used
649 * beyond the critical section, the caller is responsible for ensuring the 649 * beyond the critical section, the caller is responsible for ensuring the
650 * returned pool is and stays online. 650 * returned pool is and stays online.
651 *
652 * Return: The worker_pool @work was last associated with. %NULL if none.
651 */ 653 */
652static struct worker_pool *get_work_pool(struct work_struct *work) 654static struct worker_pool *get_work_pool(struct work_struct *work)
653{ 655{
@@ -671,7 +673,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
671 * get_work_pool_id - return the worker pool ID a given work is associated with 673 * get_work_pool_id - return the worker pool ID a given work is associated with
672 * @work: the work item of interest 674 * @work: the work item of interest
673 * 675 *
674 * Return the worker_pool ID @work was last associated with. 676 * Return: The worker_pool ID @work was last associated with.
675 * %WORK_OFFQ_POOL_NONE if none. 677 * %WORK_OFFQ_POOL_NONE if none.
676 */ 678 */
677static int get_work_pool_id(struct work_struct *work) 679static int get_work_pool_id(struct work_struct *work)
@@ -830,7 +832,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu)
830 * CONTEXT: 832 * CONTEXT:
831 * spin_lock_irq(rq->lock) 833 * spin_lock_irq(rq->lock)
832 * 834 *
833 * RETURNS: 835 * Return:
834 * Worker task on @cpu to wake up, %NULL if none. 836 * Worker task on @cpu to wake up, %NULL if none.
835 */ 837 */
836struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) 838struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
@@ -965,8 +967,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
965 * CONTEXT: 967 * CONTEXT:
966 * spin_lock_irq(pool->lock). 968 * spin_lock_irq(pool->lock).
967 * 969 *
968 * RETURNS: 970 * Return:
969 * Pointer to worker which is executing @work if found, NULL 971 * Pointer to worker which is executing @work if found, %NULL
970 * otherwise. 972 * otherwise.
971 */ 973 */
972static struct worker *find_worker_executing_work(struct worker_pool *pool, 974static struct worker *find_worker_executing_work(struct worker_pool *pool,
@@ -1154,14 +1156,16 @@ out_put:
1154 * @flags: place to store irq state 1156 * @flags: place to store irq state
1155 * 1157 *
1156 * Try to grab PENDING bit of @work. This function can handle @work in any 1158 * Try to grab PENDING bit of @work. This function can handle @work in any
1157 * stable state - idle, on timer or on worklist. Return values are 1159 * stable state - idle, on timer or on worklist.
1158 * 1160 *
1161 * Return:
1159 * 1 if @work was pending and we successfully stole PENDING 1162 * 1 if @work was pending and we successfully stole PENDING
1160 * 0 if @work was idle and we claimed PENDING 1163 * 0 if @work was idle and we claimed PENDING
1161 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1164 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1162 * -ENOENT if someone else is canceling @work, this state may persist 1165 * -ENOENT if someone else is canceling @work, this state may persist
1163 * for arbitrarily long 1166 * for arbitrarily long
1164 * 1167 *
1168 * Note:
1165 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1169 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1166 * interrupted while holding PENDING and @work off queue, irq must be 1170 * interrupted while holding PENDING and @work off queue, irq must be
1167 * disabled on entry. This, combined with delayed_work->timer being 1171 * disabled on entry. This, combined with delayed_work->timer being
@@ -1403,10 +1407,10 @@ retry:
1403 * @wq: workqueue to use 1407 * @wq: workqueue to use
1404 * @work: work to queue 1408 * @work: work to queue
1405 * 1409 *
1406 * Returns %false if @work was already on a queue, %true otherwise.
1407 *
1408 * We queue the work to a specific CPU, the caller must ensure it 1410 * We queue the work to a specific CPU, the caller must ensure it
1409 * can't go away. 1411 * can't go away.
1412 *
1413 * Return: %false if @work was already on a queue, %true otherwise.
1410 */ 1414 */
1411bool queue_work_on(int cpu, struct workqueue_struct *wq, 1415bool queue_work_on(int cpu, struct workqueue_struct *wq,
1412 struct work_struct *work) 1416 struct work_struct *work)
@@ -1476,7 +1480,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1476 * @dwork: work to queue 1480 * @dwork: work to queue
1477 * @delay: number of jiffies to wait before queueing 1481 * @delay: number of jiffies to wait before queueing
1478 * 1482 *
1479 * Returns %false if @work was already on a queue, %true otherwise. If 1483 * Return: %false if @work was already on a queue, %true otherwise. If
1480 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1484 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1481 * execution. 1485 * execution.
1482 */ 1486 */
@@ -1512,7 +1516,7 @@ EXPORT_SYMBOL(queue_delayed_work_on);
1512 * zero, @work is guaranteed to be scheduled immediately regardless of its 1516 * zero, @work is guaranteed to be scheduled immediately regardless of its
1513 * current state. 1517 * current state.
1514 * 1518 *
1515 * Returns %false if @dwork was idle and queued, %true if @dwork was 1519 * Return: %false if @dwork was idle and queued, %true if @dwork was
1516 * pending and its timer was modified. 1520 * pending and its timer was modified.
1517 * 1521 *
1518 * This function is safe to call from any context including IRQ handler. 1522 * This function is safe to call from any context including IRQ handler.
@@ -1627,7 +1631,7 @@ static void worker_leave_idle(struct worker *worker)
1627 * Might sleep. Called without any lock but returns with pool->lock 1631 * Might sleep. Called without any lock but returns with pool->lock
1628 * held. 1632 * held.
1629 * 1633 *
1630 * RETURNS: 1634 * Return:
1631 * %true if the associated pool is online (@worker is successfully 1635 * %true if the associated pool is online (@worker is successfully
1632 * bound), %false if offline. 1636 * bound), %false if offline.
1633 */ 1637 */
@@ -1688,7 +1692,7 @@ static struct worker *alloc_worker(void)
1688 * CONTEXT: 1692 * CONTEXT:
1689 * Might sleep. Does GFP_KERNEL allocations. 1693 * Might sleep. Does GFP_KERNEL allocations.
1690 * 1694 *
1691 * RETURNS: 1695 * Return:
1692 * Pointer to the newly created worker. 1696 * Pointer to the newly created worker.
1693 */ 1697 */
1694static struct worker *create_worker(struct worker_pool *pool) 1698static struct worker *create_worker(struct worker_pool *pool)
@@ -1788,6 +1792,8 @@ static void start_worker(struct worker *worker)
1788 * @pool: the target pool 1792 * @pool: the target pool
1789 * 1793 *
1790 * Grab the managership of @pool and create and start a new worker for it. 1794 * Grab the managership of @pool and create and start a new worker for it.
1795 *
1796 * Return: 0 on success. A negative error code otherwise.
1791 */ 1797 */
1792static int create_and_start_worker(struct worker_pool *pool) 1798static int create_and_start_worker(struct worker_pool *pool)
1793{ 1799{
@@ -1932,7 +1938,7 @@ static void pool_mayday_timeout(unsigned long __pool)
1932 * multiple times. Does GFP_KERNEL allocations. Called only from 1938 * multiple times. Does GFP_KERNEL allocations. Called only from
1933 * manager. 1939 * manager.
1934 * 1940 *
1935 * RETURNS: 1941 * Return:
1936 * %false if no action was taken and pool->lock stayed locked, %true 1942 * %false if no action was taken and pool->lock stayed locked, %true
1937 * otherwise. 1943 * otherwise.
1938 */ 1944 */
@@ -1989,7 +1995,7 @@ restart:
1989 * spin_lock_irq(pool->lock) which may be released and regrabbed 1995 * spin_lock_irq(pool->lock) which may be released and regrabbed
1990 * multiple times. Called only from manager. 1996 * multiple times. Called only from manager.
1991 * 1997 *
1992 * RETURNS: 1998 * Return:
1993 * %false if no action was taken and pool->lock stayed locked, %true 1999 * %false if no action was taken and pool->lock stayed locked, %true
1994 * otherwise. 2000 * otherwise.
1995 */ 2001 */
@@ -2032,7 +2038,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
2032 * spin_lock_irq(pool->lock) which may be released and regrabbed 2038 * spin_lock_irq(pool->lock) which may be released and regrabbed
2033 * multiple times. Does GFP_KERNEL allocations. 2039 * multiple times. Does GFP_KERNEL allocations.
2034 * 2040 *
2035 * RETURNS: 2041 * Return:
2036 * spin_lock_irq(pool->lock) which may be released and regrabbed 2042 * spin_lock_irq(pool->lock) which may be released and regrabbed
2037 * multiple times. Does GFP_KERNEL allocations. 2043 * multiple times. Does GFP_KERNEL allocations.
2038 */ 2044 */
@@ -2246,6 +2252,8 @@ static void process_scheduled_works(struct worker *worker)
2246 * work items regardless of their specific target workqueue. The only 2252 * work items regardless of their specific target workqueue. The only
2247 * exception is work items which belong to workqueues with a rescuer which 2253 * exception is work items which belong to workqueues with a rescuer which
2248 * will be explained in rescuer_thread(). 2254 * will be explained in rescuer_thread().
2255 *
2256 * Return: 0
2249 */ 2257 */
2250static int worker_thread(void *__worker) 2258static int worker_thread(void *__worker)
2251{ 2259{
@@ -2344,6 +2352,8 @@ sleep:
2344 * those works so that forward progress can be guaranteed. 2352 * those works so that forward progress can be guaranteed.
2345 * 2353 *
2346 * This should happen rarely. 2354 * This should happen rarely.
2355 *
2356 * Return: 0
2347 */ 2357 */
2348static int rescuer_thread(void *__rescuer) 2358static int rescuer_thread(void *__rescuer)
2349{ 2359{
@@ -2516,7 +2526,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
2516 * CONTEXT: 2526 * CONTEXT:
2517 * mutex_lock(wq->mutex). 2527 * mutex_lock(wq->mutex).
2518 * 2528 *
2519 * RETURNS: 2529 * Return:
2520 * %true if @flush_color >= 0 and there's something to flush. %false 2530 * %true if @flush_color >= 0 and there's something to flush. %false
2521 * otherwise. 2531 * otherwise.
2522 */ 2532 */
@@ -2824,7 +2834,7 @@ already_gone:
2824 * Wait until @work has finished execution. @work is guaranteed to be idle 2834 * Wait until @work has finished execution. @work is guaranteed to be idle
2825 * on return if it hasn't been requeued since flush started. 2835 * on return if it hasn't been requeued since flush started.
2826 * 2836 *
2827 * RETURNS: 2837 * Return:
2828 * %true if flush_work() waited for the work to finish execution, 2838 * %true if flush_work() waited for the work to finish execution,
2829 * %false if it was already idle. 2839 * %false if it was already idle.
2830 */ 2840 */
@@ -2884,7 +2894,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2884 * The caller must ensure that the workqueue on which @work was last 2894 * The caller must ensure that the workqueue on which @work was last
2885 * queued can't be destroyed before this function returns. 2895 * queued can't be destroyed before this function returns.
2886 * 2896 *
2887 * RETURNS: 2897 * Return:
2888 * %true if @work was pending, %false otherwise. 2898 * %true if @work was pending, %false otherwise.
2889 */ 2899 */
2890bool cancel_work_sync(struct work_struct *work) 2900bool cancel_work_sync(struct work_struct *work)
@@ -2901,7 +2911,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
2901 * immediate execution. Like flush_work(), this function only 2911 * immediate execution. Like flush_work(), this function only
2902 * considers the last queueing instance of @dwork. 2912 * considers the last queueing instance of @dwork.
2903 * 2913 *
2904 * RETURNS: 2914 * Return:
2905 * %true if flush_work() waited for the work to finish execution, 2915 * %true if flush_work() waited for the work to finish execution,
2906 * %false if it was already idle. 2916 * %false if it was already idle.
2907 */ 2917 */
@@ -2919,11 +2929,15 @@ EXPORT_SYMBOL(flush_delayed_work);
2919 * cancel_delayed_work - cancel a delayed work 2929 * cancel_delayed_work - cancel a delayed work
2920 * @dwork: delayed_work to cancel 2930 * @dwork: delayed_work to cancel
2921 * 2931 *
2922 * Kill off a pending delayed_work. Returns %true if @dwork was pending 2932 * Kill off a pending delayed_work.
2923 * and canceled; %false if wasn't pending. Note that the work callback 2933 *
2924 * function may still be running on return, unless it returns %true and the 2934 * Return: %true if @dwork was pending and canceled; %false if it wasn't
2925 * work doesn't re-arm itself. Explicitly flush or use 2935 * pending.
2926 * cancel_delayed_work_sync() to wait on it. 2936 *
2937 * Note:
2938 * The work callback function may still be running on return, unless
2939 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
2940 * use cancel_delayed_work_sync() to wait on it.
2927 * 2941 *
2928 * This function is safe to call from any context including IRQ handler. 2942 * This function is safe to call from any context including IRQ handler.
2929 */ 2943 */
@@ -2952,7 +2966,7 @@ EXPORT_SYMBOL(cancel_delayed_work);
2952 * 2966 *
2953 * This is cancel_work_sync() for delayed works. 2967 * This is cancel_work_sync() for delayed works.
2954 * 2968 *
2955 * RETURNS: 2969 * Return:
2956 * %true if @dwork was pending, %false otherwise. 2970 * %true if @dwork was pending, %false otherwise.
2957 */ 2971 */
2958bool cancel_delayed_work_sync(struct delayed_work *dwork) 2972bool cancel_delayed_work_sync(struct delayed_work *dwork)
@@ -2969,7 +2983,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync);
2969 * system workqueue and blocks until all CPUs have completed. 2983 * system workqueue and blocks until all CPUs have completed.
2970 * schedule_on_each_cpu() is very slow. 2984 * schedule_on_each_cpu() is very slow.
2971 * 2985 *
2972 * RETURNS: 2986 * Return:
2973 * 0 on success, -errno on failure. 2987 * 0 on success, -errno on failure.
2974 */ 2988 */
2975int schedule_on_each_cpu(work_func_t func) 2989int schedule_on_each_cpu(work_func_t func)
@@ -3037,7 +3051,7 @@ EXPORT_SYMBOL(flush_scheduled_work);
3037 * Executes the function immediately if process context is available, 3051 * Executes the function immediately if process context is available,
3038 * otherwise schedules the function for delayed execution. 3052 * otherwise schedules the function for delayed execution.
3039 * 3053 *
3040 * Returns: 0 - function was executed 3054 * Return: 0 - function was executed
3041 * 1 - function was scheduled for execution 3055 * 1 - function was scheduled for execution
3042 */ 3056 */
3043int execute_in_process_context(work_func_t fn, struct execute_work *ew) 3057int execute_in_process_context(work_func_t fn, struct execute_work *ew)
@@ -3294,7 +3308,7 @@ static void wq_device_release(struct device *dev)
3294 * apply_workqueue_attrs() may race against userland updating the 3308 * apply_workqueue_attrs() may race against userland updating the
3295 * attributes. 3309 * attributes.
3296 * 3310 *
3297 * Returns 0 on success, -errno on failure. 3311 * Return: 0 on success, -errno on failure.
3298 */ 3312 */
3299int workqueue_sysfs_register(struct workqueue_struct *wq) 3313int workqueue_sysfs_register(struct workqueue_struct *wq)
3300{ 3314{
@@ -3387,7 +3401,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
3387 * @gfp_mask: allocation mask to use 3401 * @gfp_mask: allocation mask to use
3388 * 3402 *
3389 * Allocate a new workqueue_attrs, initialize with default settings and 3403 * Allocate a new workqueue_attrs, initialize with default settings and
3390 * return it. Returns NULL on failure. 3404 * return it.
3405 *
3406 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3391 */ 3407 */
3392struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 3408struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3393{ 3409{
@@ -3440,7 +3456,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
3440 * @pool: worker_pool to initialize 3456 * @pool: worker_pool to initialize
3441 * 3457 *
3442 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. 3458 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
3443 * Returns 0 on success, -errno on failure. Even on failure, all fields 3459 *
3460 * Return: 0 on success, -errno on failure. Even on failure, all fields
3444 * inside @pool proper are initialized and put_unbound_pool() can be called 3461 * inside @pool proper are initialized and put_unbound_pool() can be called
3445 * on @pool safely to release it. 3462 * on @pool safely to release it.
3446 */ 3463 */
@@ -3547,9 +3564,12 @@ static void put_unbound_pool(struct worker_pool *pool)
3547 * Obtain a worker_pool which has the same attributes as @attrs, bump the 3564 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3548 * reference count and return it. If there already is a matching 3565 * reference count and return it. If there already is a matching
3549 * worker_pool, it will be used; otherwise, this function attempts to 3566 * worker_pool, it will be used; otherwise, this function attempts to
3550 * create a new one. On failure, returns NULL. 3567 * create a new one.
3551 * 3568 *
3552 * Should be called with wq_pool_mutex held. 3569 * Should be called with wq_pool_mutex held.
3570 *
3571 * Return: On success, a worker_pool with the same attributes as @attrs.
3572 * On failure, %NULL.
3553 */ 3573 */
3554static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 3574static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3555{ 3575{
@@ -3779,9 +3799,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq)
3779 * 3799 *
3780 * Calculate the cpumask a workqueue with @attrs should use on @node. If 3800 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3781 * @cpu_going_down is >= 0, that cpu is considered offline during 3801 * @cpu_going_down is >= 0, that cpu is considered offline during
3782 * calculation. The result is stored in @cpumask. This function returns 3802 * calculation. The result is stored in @cpumask.
3783 * %true if the resulting @cpumask is different from @attrs->cpumask,
3784 * %false if equal.
3785 * 3803 *
3786 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If 3804 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3787 * enabled and @node has online CPUs requested by @attrs, the returned 3805 * enabled and @node has online CPUs requested by @attrs, the returned
@@ -3790,6 +3808,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq)
3790 * 3808 *
3791 * The caller is responsible for ensuring that the cpumask of @node stays 3809 * The caller is responsible for ensuring that the cpumask of @node stays
3792 * stable. 3810 * stable.
3811 *
3812 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3813 * %false if equal.
3793 */ 3814 */
3794static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, 3815static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3795 int cpu_going_down, cpumask_t *cpumask) 3816 int cpu_going_down, cpumask_t *cpumask)
@@ -3843,8 +3864,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3843 * items finish. Note that a work item which repeatedly requeues itself 3864 * items finish. Note that a work item which repeatedly requeues itself
3844 * back-to-back will stay on its current pwq. 3865 * back-to-back will stay on its current pwq.
3845 * 3866 *
3846 * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on 3867 * Performs GFP_KERNEL allocations.
3847 * failure. 3868 *
3869 * Return: 0 on success and -errno on failure.
3848 */ 3870 */
3849int apply_workqueue_attrs(struct workqueue_struct *wq, 3871int apply_workqueue_attrs(struct workqueue_struct *wq,
3850 const struct workqueue_attrs *attrs) 3872 const struct workqueue_attrs *attrs)
@@ -4312,6 +4334,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4312 * 4334 *
4313 * Determine whether %current is a workqueue rescuer. Can be used from 4335 * Determine whether %current is a workqueue rescuer. Can be used from
4314 * work functions to determine whether it's being run off the rescuer task. 4336 * work functions to determine whether it's being run off the rescuer task.
4337 *
4338 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4315 */ 4339 */
4316bool current_is_workqueue_rescuer(void) 4340bool current_is_workqueue_rescuer(void)
4317{ 4341{
@@ -4335,7 +4359,7 @@ bool current_is_workqueue_rescuer(void)
4335 * workqueue being congested on one CPU doesn't mean the workqueue is also 4359 * workqueue being congested on one CPU doesn't mean the workqueue is also
4336 * contested on other CPUs / NUMA nodes. 4360 * contested on other CPUs / NUMA nodes.
4337 * 4361 *
4338 * RETURNS: 4362 * Return:
4339 * %true if congested, %false otherwise. 4363 * %true if congested, %false otherwise.
4340 */ 4364 */
4341bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4365bool workqueue_congested(int cpu, struct workqueue_struct *wq)
@@ -4368,7 +4392,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
4368 * synchronization around this function and the test result is 4392 * synchronization around this function and the test result is
4369 * unreliable and only useful as advisory hints or for debugging. 4393 * unreliable and only useful as advisory hints or for debugging.
4370 * 4394 *
4371 * RETURNS: 4395 * Return:
4372 * OR'd bitmask of WORK_BUSY_* bits. 4396 * OR'd bitmask of WORK_BUSY_* bits.
4373 */ 4397 */
4374unsigned int work_busy(struct work_struct *work) 4398unsigned int work_busy(struct work_struct *work)
@@ -4746,9 +4770,10 @@ static void work_for_cpu_fn(struct work_struct *work)
4746 * @fn: the function to run 4770 * @fn: the function to run
4747 * @arg: the function arg 4771 * @arg: the function arg
4748 * 4772 *
4749 * This will return the value @fn returns.
4750 * It is up to the caller to ensure that the cpu doesn't go offline. 4773 * It is up to the caller to ensure that the cpu doesn't go offline.
4751 * The caller must not hold any locks which would prevent @fn from completing. 4774 * The caller must not hold any locks which would prevent @fn from completing.
4775 *
4776 * Return: The value @fn returns.
4752 */ 4777 */
4753long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 4778long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4754{ 4779{
@@ -4813,7 +4838,7 @@ void freeze_workqueues_begin(void)
4813 * CONTEXT: 4838 * CONTEXT:
4814 * Grabs and releases wq_pool_mutex. 4839 * Grabs and releases wq_pool_mutex.
4815 * 4840 *
4816 * RETURNS: 4841 * Return:
4817 * %true if some freezable workqueues are still busy. %false if freezing 4842 * %true if some freezable workqueues are still busy. %false if freezing
4818 * is complete. 4843 * is complete.
4819 */ 4844 */