diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-06 12:36:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-06 12:36:28 -0400 |
commit | 2e515bf096c245ba87f20ab4b4ea20f911afaeda (patch) | |
tree | 8ce40f811092844ea9da683804db6e2afa410808 /kernel/workqueue.c | |
parent | 22e04f6b4b04a8afe9af9239224591d06ba3b24d (diff) | |
parent | f8ea61e63442c25cbe6ddee48979b444f1f2a01c (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
Pull trivial tree from Jiri Kosina:
"The usual trivial updates all over the tree -- mostly typo fixes and
documentation updates"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: (52 commits)
doc: Documentation/cputopology.txt fix typo
treewide: Convert retrun typos to return
Fix comment typo for init_cma_reserved_pageblock
Documentation/trace: Correcting and extending tracepoint documentation
mm/hotplug: fix a typo in Documentation/memory-hotplug.txt
power: Documentation: Update s2ram link
doc: fix a typo in Documentation/00-INDEX
Documentation/printk-formats.txt: No casts needed for u64/s64
doc: Fix typo "is is" in Documentations
treewide: Fix printks with 0x%#
zram: doc fixes
Documentation/kmemcheck: update kmemcheck documentation
doc: documentation/hwspinlock.txt fix typo
PM / Hibernate: add section for resume options
doc: filesystems : Fix typo in Documentations/filesystems
scsi/megaraid fixed several typos in comments
ppc: init_32: Fix error typo "CONFIG_START_KERNEL"
treewide: Add __GFP_NOWARN to k.alloc calls with v.alloc fallbacks
page_isolation: Fix a comment typo in test_pages_isolated()
doc: fix a typo about irq affinity
...
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 107 |
1 files changed, 66 insertions, 41 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 29b79852a845..987293d03ebc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -541,6 +541,8 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
541 | * This must be called either with pwq_lock held or sched RCU read locked. | 541 | * This must be called either with pwq_lock held or sched RCU read locked. |
542 | * If the pwq needs to be used beyond the locking in effect, the caller is | 542 | * If the pwq needs to be used beyond the locking in effect, the caller is |
543 | * responsible for guaranteeing that the pwq stays online. | 543 | * responsible for guaranteeing that the pwq stays online. |
544 | * | ||
545 | * Return: The unbound pool_workqueue for @node. | ||
544 | */ | 546 | */ |
545 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, | 547 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, |
546 | int node) | 548 | int node) |
@@ -639,8 +641,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
639 | * get_work_pool - return the worker_pool a given work was associated with | 641 | * get_work_pool - return the worker_pool a given work was associated with |
640 | * @work: the work item of interest | 642 | * @work: the work item of interest |
641 | * | 643 | * |
642 | * Return the worker_pool @work was last associated with. %NULL if none. | ||
643 | * | ||
644 | * Pools are created and destroyed under wq_pool_mutex, and allows read | 644 | * Pools are created and destroyed under wq_pool_mutex, and allows read |
645 | * access under sched-RCU read lock. As such, this function should be | 645 | * access under sched-RCU read lock. As such, this function should be |
646 | * called under wq_pool_mutex or with preemption disabled. | 646 | * called under wq_pool_mutex or with preemption disabled. |
@@ -649,6 +649,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
649 | * mentioned locking is in effect. If the returned pool needs to be used | 649 | * mentioned locking is in effect. If the returned pool needs to be used |
650 | * beyond the critical section, the caller is responsible for ensuring the | 650 | * beyond the critical section, the caller is responsible for ensuring the |
651 | * returned pool is and stays online. | 651 | * returned pool is and stays online. |
652 | * | ||
653 | * Return: The worker_pool @work was last associated with. %NULL if none. | ||
652 | */ | 654 | */ |
653 | static struct worker_pool *get_work_pool(struct work_struct *work) | 655 | static struct worker_pool *get_work_pool(struct work_struct *work) |
654 | { | 656 | { |
@@ -672,7 +674,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work) | |||
672 | * get_work_pool_id - return the worker pool ID a given work is associated with | 674 | * get_work_pool_id - return the worker pool ID a given work is associated with |
673 | * @work: the work item of interest | 675 | * @work: the work item of interest |
674 | * | 676 | * |
675 | * Return the worker_pool ID @work was last associated with. | 677 | * Return: The worker_pool ID @work was last associated with. |
676 | * %WORK_OFFQ_POOL_NONE if none. | 678 | * %WORK_OFFQ_POOL_NONE if none. |
677 | */ | 679 | */ |
678 | static int get_work_pool_id(struct work_struct *work) | 680 | static int get_work_pool_id(struct work_struct *work) |
@@ -831,7 +833,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu) | |||
831 | * CONTEXT: | 833 | * CONTEXT: |
832 | * spin_lock_irq(rq->lock) | 834 | * spin_lock_irq(rq->lock) |
833 | * | 835 | * |
834 | * RETURNS: | 836 | * Return: |
835 | * Worker task on @cpu to wake up, %NULL if none. | 837 | * Worker task on @cpu to wake up, %NULL if none. |
836 | */ | 838 | */ |
837 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) | 839 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) |
@@ -966,8 +968,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
966 | * CONTEXT: | 968 | * CONTEXT: |
967 | * spin_lock_irq(pool->lock). | 969 | * spin_lock_irq(pool->lock). |
968 | * | 970 | * |
969 | * RETURNS: | 971 | * Return: |
970 | * Pointer to worker which is executing @work if found, NULL | 972 | * Pointer to worker which is executing @work if found, %NULL |
971 | * otherwise. | 973 | * otherwise. |
972 | */ | 974 | */ |
973 | static struct worker *find_worker_executing_work(struct worker_pool *pool, | 975 | static struct worker *find_worker_executing_work(struct worker_pool *pool, |
@@ -1155,14 +1157,16 @@ out_put: | |||
1155 | * @flags: place to store irq state | 1157 | * @flags: place to store irq state |
1156 | * | 1158 | * |
1157 | * Try to grab PENDING bit of @work. This function can handle @work in any | 1159 | * Try to grab PENDING bit of @work. This function can handle @work in any |
1158 | * stable state - idle, on timer or on worklist. Return values are | 1160 | * stable state - idle, on timer or on worklist. |
1159 | * | 1161 | * |
1162 | * Return: | ||
1160 | * 1 if @work was pending and we successfully stole PENDING | 1163 | * 1 if @work was pending and we successfully stole PENDING |
1161 | * 0 if @work was idle and we claimed PENDING | 1164 | * 0 if @work was idle and we claimed PENDING |
1162 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry | 1165 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry |
1163 | * -ENOENT if someone else is canceling @work, this state may persist | 1166 | * -ENOENT if someone else is canceling @work, this state may persist |
1164 | * for arbitrarily long | 1167 | * for arbitrarily long |
1165 | * | 1168 | * |
1169 | * Note: | ||
1166 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting | 1170 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting |
1167 | * interrupted while holding PENDING and @work off queue, irq must be | 1171 | * interrupted while holding PENDING and @work off queue, irq must be |
1168 | * disabled on entry. This, combined with delayed_work->timer being | 1172 | * disabled on entry. This, combined with delayed_work->timer being |
@@ -1404,10 +1408,10 @@ retry: | |||
1404 | * @wq: workqueue to use | 1408 | * @wq: workqueue to use |
1405 | * @work: work to queue | 1409 | * @work: work to queue |
1406 | * | 1410 | * |
1407 | * Returns %false if @work was already on a queue, %true otherwise. | ||
1408 | * | ||
1409 | * We queue the work to a specific CPU, the caller must ensure it | 1411 | * We queue the work to a specific CPU, the caller must ensure it |
1410 | * can't go away. | 1412 | * can't go away. |
1413 | * | ||
1414 | * Return: %false if @work was already on a queue, %true otherwise. | ||
1411 | */ | 1415 | */ |
1412 | bool queue_work_on(int cpu, struct workqueue_struct *wq, | 1416 | bool queue_work_on(int cpu, struct workqueue_struct *wq, |
1413 | struct work_struct *work) | 1417 | struct work_struct *work) |
@@ -1477,7 +1481,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, | |||
1477 | * @dwork: work to queue | 1481 | * @dwork: work to queue |
1478 | * @delay: number of jiffies to wait before queueing | 1482 | * @delay: number of jiffies to wait before queueing |
1479 | * | 1483 | * |
1480 | * Returns %false if @work was already on a queue, %true otherwise. If | 1484 | * Return: %false if @work was already on a queue, %true otherwise. If |
1481 | * @delay is zero and @dwork is idle, it will be scheduled for immediate | 1485 | * @delay is zero and @dwork is idle, it will be scheduled for immediate |
1482 | * execution. | 1486 | * execution. |
1483 | */ | 1487 | */ |
@@ -1513,7 +1517,7 @@ EXPORT_SYMBOL(queue_delayed_work_on); | |||
1513 | * zero, @work is guaranteed to be scheduled immediately regardless of its | 1517 | * zero, @work is guaranteed to be scheduled immediately regardless of its |
1514 | * current state. | 1518 | * current state. |
1515 | * | 1519 | * |
1516 | * Returns %false if @dwork was idle and queued, %true if @dwork was | 1520 | * Return: %false if @dwork was idle and queued, %true if @dwork was |
1517 | * pending and its timer was modified. | 1521 | * pending and its timer was modified. |
1518 | * | 1522 | * |
1519 | * This function is safe to call from any context including IRQ handler. | 1523 | * This function is safe to call from any context including IRQ handler. |
@@ -1628,7 +1632,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1628 | * Might sleep. Called without any lock but returns with pool->lock | 1632 | * Might sleep. Called without any lock but returns with pool->lock |
1629 | * held. | 1633 | * held. |
1630 | * | 1634 | * |
1631 | * RETURNS: | 1635 | * Return: |
1632 | * %true if the associated pool is online (@worker is successfully | 1636 | * %true if the associated pool is online (@worker is successfully |
1633 | * bound), %false if offline. | 1637 | * bound), %false if offline. |
1634 | */ | 1638 | */ |
@@ -1689,7 +1693,7 @@ static struct worker *alloc_worker(void) | |||
1689 | * CONTEXT: | 1693 | * CONTEXT: |
1690 | * Might sleep. Does GFP_KERNEL allocations. | 1694 | * Might sleep. Does GFP_KERNEL allocations. |
1691 | * | 1695 | * |
1692 | * RETURNS: | 1696 | * Return: |
1693 | * Pointer to the newly created worker. | 1697 | * Pointer to the newly created worker. |
1694 | */ | 1698 | */ |
1695 | static struct worker *create_worker(struct worker_pool *pool) | 1699 | static struct worker *create_worker(struct worker_pool *pool) |
@@ -1789,6 +1793,8 @@ static void start_worker(struct worker *worker) | |||
1789 | * @pool: the target pool | 1793 | * @pool: the target pool |
1790 | * | 1794 | * |
1791 | * Grab the managership of @pool and create and start a new worker for it. | 1795 | * Grab the managership of @pool and create and start a new worker for it. |
1796 | * | ||
1797 | * Return: 0 on success. A negative error code otherwise. | ||
1792 | */ | 1798 | */ |
1793 | static int create_and_start_worker(struct worker_pool *pool) | 1799 | static int create_and_start_worker(struct worker_pool *pool) |
1794 | { | 1800 | { |
@@ -1933,7 +1939,7 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
1933 | * multiple times. Does GFP_KERNEL allocations. Called only from | 1939 | * multiple times. Does GFP_KERNEL allocations. Called only from |
1934 | * manager. | 1940 | * manager. |
1935 | * | 1941 | * |
1936 | * RETURNS: | 1942 | * Return: |
1937 | * %false if no action was taken and pool->lock stayed locked, %true | 1943 | * %false if no action was taken and pool->lock stayed locked, %true |
1938 | * otherwise. | 1944 | * otherwise. |
1939 | */ | 1945 | */ |
@@ -1990,7 +1996,7 @@ restart: | |||
1990 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 1996 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
1991 | * multiple times. Called only from manager. | 1997 | * multiple times. Called only from manager. |
1992 | * | 1998 | * |
1993 | * RETURNS: | 1999 | * Return: |
1994 | * %false if no action was taken and pool->lock stayed locked, %true | 2000 | * %false if no action was taken and pool->lock stayed locked, %true |
1995 | * otherwise. | 2001 | * otherwise. |
1996 | */ | 2002 | */ |
@@ -2033,7 +2039,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool) | |||
2033 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 2039 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
2034 | * multiple times. Does GFP_KERNEL allocations. | 2040 | * multiple times. Does GFP_KERNEL allocations. |
2035 | * | 2041 | * |
2036 | * RETURNS: | 2042 | * Return: |
2037 | * %false if the pool don't need management and the caller can safely start | 2043 | * %false if the pool don't need management and the caller can safely start |
2038 | * processing works, %true indicates that the function released pool->lock | 2044 | * processing works, %true indicates that the function released pool->lock |
2039 | * and reacquired it to perform some management function and that the | 2045 | * and reacquired it to perform some management function and that the |
@@ -2259,6 +2265,8 @@ static void process_scheduled_works(struct worker *worker) | |||
2259 | * work items regardless of their specific target workqueue. The only | 2265 | * work items regardless of their specific target workqueue. The only |
2260 | * exception is work items which belong to workqueues with a rescuer which | 2266 | * exception is work items which belong to workqueues with a rescuer which |
2261 | * will be explained in rescuer_thread(). | 2267 | * will be explained in rescuer_thread(). |
2268 | * | ||
2269 | * Return: 0 | ||
2262 | */ | 2270 | */ |
2263 | static int worker_thread(void *__worker) | 2271 | static int worker_thread(void *__worker) |
2264 | { | 2272 | { |
@@ -2357,6 +2365,8 @@ sleep: | |||
2357 | * those works so that forward progress can be guaranteed. | 2365 | * those works so that forward progress can be guaranteed. |
2358 | * | 2366 | * |
2359 | * This should happen rarely. | 2367 | * This should happen rarely. |
2368 | * | ||
2369 | * Return: 0 | ||
2360 | */ | 2370 | */ |
2361 | static int rescuer_thread(void *__rescuer) | 2371 | static int rescuer_thread(void *__rescuer) |
2362 | { | 2372 | { |
@@ -2529,7 +2539,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, | |||
2529 | * CONTEXT: | 2539 | * CONTEXT: |
2530 | * mutex_lock(wq->mutex). | 2540 | * mutex_lock(wq->mutex). |
2531 | * | 2541 | * |
2532 | * RETURNS: | 2542 | * Return: |
2533 | * %true if @flush_color >= 0 and there's something to flush. %false | 2543 | * %true if @flush_color >= 0 and there's something to flush. %false |
2534 | * otherwise. | 2544 | * otherwise. |
2535 | */ | 2545 | */ |
@@ -2850,7 +2860,7 @@ static bool __flush_work(struct work_struct *work) | |||
2850 | * Wait until @work has finished execution. @work is guaranteed to be idle | 2860 | * Wait until @work has finished execution. @work is guaranteed to be idle |
2851 | * on return if it hasn't been requeued since flush started. | 2861 | * on return if it hasn't been requeued since flush started. |
2852 | * | 2862 | * |
2853 | * RETURNS: | 2863 | * Return: |
2854 | * %true if flush_work() waited for the work to finish execution, | 2864 | * %true if flush_work() waited for the work to finish execution, |
2855 | * %false if it was already idle. | 2865 | * %false if it was already idle. |
2856 | */ | 2866 | */ |
@@ -2902,7 +2912,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2902 | * The caller must ensure that the workqueue on which @work was last | 2912 | * The caller must ensure that the workqueue on which @work was last |
2903 | * queued can't be destroyed before this function returns. | 2913 | * queued can't be destroyed before this function returns. |
2904 | * | 2914 | * |
2905 | * RETURNS: | 2915 | * Return: |
2906 | * %true if @work was pending, %false otherwise. | 2916 | * %true if @work was pending, %false otherwise. |
2907 | */ | 2917 | */ |
2908 | bool cancel_work_sync(struct work_struct *work) | 2918 | bool cancel_work_sync(struct work_struct *work) |
@@ -2919,7 +2929,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); | |||
2919 | * immediate execution. Like flush_work(), this function only | 2929 | * immediate execution. Like flush_work(), this function only |
2920 | * considers the last queueing instance of @dwork. | 2930 | * considers the last queueing instance of @dwork. |
2921 | * | 2931 | * |
2922 | * RETURNS: | 2932 | * Return: |
2923 | * %true if flush_work() waited for the work to finish execution, | 2933 | * %true if flush_work() waited for the work to finish execution, |
2924 | * %false if it was already idle. | 2934 | * %false if it was already idle. |
2925 | */ | 2935 | */ |
@@ -2937,11 +2947,15 @@ EXPORT_SYMBOL(flush_delayed_work); | |||
2937 | * cancel_delayed_work - cancel a delayed work | 2947 | * cancel_delayed_work - cancel a delayed work |
2938 | * @dwork: delayed_work to cancel | 2948 | * @dwork: delayed_work to cancel |
2939 | * | 2949 | * |
2940 | * Kill off a pending delayed_work. Returns %true if @dwork was pending | 2950 | * Kill off a pending delayed_work. |
2941 | * and canceled; %false if wasn't pending. Note that the work callback | 2951 | * |
2942 | * function may still be running on return, unless it returns %true and the | 2952 | * Return: %true if @dwork was pending and canceled; %false if it wasn't |
2943 | * work doesn't re-arm itself. Explicitly flush or use | 2953 | * pending. |
2944 | * cancel_delayed_work_sync() to wait on it. | 2954 | * |
2955 | * Note: | ||
2956 | * The work callback function may still be running on return, unless | ||
2957 | * it returns %true and the work doesn't re-arm itself. Explicitly flush or | ||
2958 | * use cancel_delayed_work_sync() to wait on it. | ||
2945 | * | 2959 | * |
2946 | * This function is safe to call from any context including IRQ handler. | 2960 | * This function is safe to call from any context including IRQ handler. |
2947 | */ | 2961 | */ |
@@ -2970,7 +2984,7 @@ EXPORT_SYMBOL(cancel_delayed_work); | |||
2970 | * | 2984 | * |
2971 | * This is cancel_work_sync() for delayed works. | 2985 | * This is cancel_work_sync() for delayed works. |
2972 | * | 2986 | * |
2973 | * RETURNS: | 2987 | * Return: |
2974 | * %true if @dwork was pending, %false otherwise. | 2988 | * %true if @dwork was pending, %false otherwise. |
2975 | */ | 2989 | */ |
2976 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | 2990 | bool cancel_delayed_work_sync(struct delayed_work *dwork) |
@@ -2987,7 +3001,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); | |||
2987 | * system workqueue and blocks until all CPUs have completed. | 3001 | * system workqueue and blocks until all CPUs have completed. |
2988 | * schedule_on_each_cpu() is very slow. | 3002 | * schedule_on_each_cpu() is very slow. |
2989 | * | 3003 | * |
2990 | * RETURNS: | 3004 | * Return: |
2991 | * 0 on success, -errno on failure. | 3005 | * 0 on success, -errno on failure. |
2992 | */ | 3006 | */ |
2993 | int schedule_on_each_cpu(work_func_t func) | 3007 | int schedule_on_each_cpu(work_func_t func) |
@@ -3055,7 +3069,7 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
3055 | * Executes the function immediately if process context is available, | 3069 | * Executes the function immediately if process context is available, |
3056 | * otherwise schedules the function for delayed execution. | 3070 | * otherwise schedules the function for delayed execution. |
3057 | * | 3071 | * |
3058 | * Returns: 0 - function was executed | 3072 | * Return: 0 - function was executed |
3059 | * 1 - function was scheduled for execution | 3073 | * 1 - function was scheduled for execution |
3060 | */ | 3074 | */ |
3061 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 3075 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
@@ -3315,7 +3329,7 @@ static void wq_device_release(struct device *dev) | |||
3315 | * apply_workqueue_attrs() may race against userland updating the | 3329 | * apply_workqueue_attrs() may race against userland updating the |
3316 | * attributes. | 3330 | * attributes. |
3317 | * | 3331 | * |
3318 | * Returns 0 on success, -errno on failure. | 3332 | * Return: 0 on success, -errno on failure. |
3319 | */ | 3333 | */ |
3320 | int workqueue_sysfs_register(struct workqueue_struct *wq) | 3334 | int workqueue_sysfs_register(struct workqueue_struct *wq) |
3321 | { | 3335 | { |
@@ -3408,7 +3422,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) | |||
3408 | * @gfp_mask: allocation mask to use | 3422 | * @gfp_mask: allocation mask to use |
3409 | * | 3423 | * |
3410 | * Allocate a new workqueue_attrs, initialize with default settings and | 3424 | * Allocate a new workqueue_attrs, initialize with default settings and |
3411 | * return it. Returns NULL on failure. | 3425 | * return it. |
3426 | * | ||
3427 | * Return: The allocated new workqueue_attr on success. %NULL on failure. | ||
3412 | */ | 3428 | */ |
3413 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) | 3429 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) |
3414 | { | 3430 | { |
@@ -3467,7 +3483,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, | |||
3467 | * @pool: worker_pool to initialize | 3483 | * @pool: worker_pool to initialize |
3468 | * | 3484 | * |
3469 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. | 3485 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. |
3470 | * Returns 0 on success, -errno on failure. Even on failure, all fields | 3486 | * |
3487 | * Return: 0 on success, -errno on failure. Even on failure, all fields | ||
3471 | * inside @pool proper are initialized and put_unbound_pool() can be called | 3488 | * inside @pool proper are initialized and put_unbound_pool() can be called |
3472 | * on @pool safely to release it. | 3489 | * on @pool safely to release it. |
3473 | */ | 3490 | */ |
@@ -3574,9 +3591,12 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3574 | * Obtain a worker_pool which has the same attributes as @attrs, bump the | 3591 | * Obtain a worker_pool which has the same attributes as @attrs, bump the |
3575 | * reference count and return it. If there already is a matching | 3592 | * reference count and return it. If there already is a matching |
3576 | * worker_pool, it will be used; otherwise, this function attempts to | 3593 | * worker_pool, it will be used; otherwise, this function attempts to |
3577 | * create a new one. On failure, returns NULL. | 3594 | * create a new one. |
3578 | * | 3595 | * |
3579 | * Should be called with wq_pool_mutex held. | 3596 | * Should be called with wq_pool_mutex held. |
3597 | * | ||
3598 | * Return: On success, a worker_pool with the same attributes as @attrs. | ||
3599 | * On failure, %NULL. | ||
3580 | */ | 3600 | */ |
3581 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | 3601 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) |
3582 | { | 3602 | { |
@@ -3812,9 +3832,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
3812 | * | 3832 | * |
3813 | * Calculate the cpumask a workqueue with @attrs should use on @node. If | 3833 | * Calculate the cpumask a workqueue with @attrs should use on @node. If |
3814 | * @cpu_going_down is >= 0, that cpu is considered offline during | 3834 | * @cpu_going_down is >= 0, that cpu is considered offline during |
3815 | * calculation. The result is stored in @cpumask. This function returns | 3835 | * calculation. The result is stored in @cpumask. |
3816 | * %true if the resulting @cpumask is different from @attrs->cpumask, | ||
3817 | * %false if equal. | ||
3818 | * | 3836 | * |
3819 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If | 3837 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If |
3820 | * enabled and @node has online CPUs requested by @attrs, the returned | 3838 | * enabled and @node has online CPUs requested by @attrs, the returned |
@@ -3823,6 +3841,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
3823 | * | 3841 | * |
3824 | * The caller is responsible for ensuring that the cpumask of @node stays | 3842 | * The caller is responsible for ensuring that the cpumask of @node stays |
3825 | * stable. | 3843 | * stable. |
3844 | * | ||
3845 | * Return: %true if the resulting @cpumask is different from @attrs->cpumask, | ||
3846 | * %false if equal. | ||
3826 | */ | 3847 | */ |
3827 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, | 3848 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, |
3828 | int cpu_going_down, cpumask_t *cpumask) | 3849 | int cpu_going_down, cpumask_t *cpumask) |
@@ -3876,8 +3897,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, | |||
3876 | * items finish. Note that a work item which repeatedly requeues itself | 3897 | * items finish. Note that a work item which repeatedly requeues itself |
3877 | * back-to-back will stay on its current pwq. | 3898 | * back-to-back will stay on its current pwq. |
3878 | * | 3899 | * |
3879 | * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on | 3900 | * Performs GFP_KERNEL allocations. |
3880 | * failure. | 3901 | * |
3902 | * Return: 0 on success and -errno on failure. | ||
3881 | */ | 3903 | */ |
3882 | int apply_workqueue_attrs(struct workqueue_struct *wq, | 3904 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
3883 | const struct workqueue_attrs *attrs) | 3905 | const struct workqueue_attrs *attrs) |
@@ -4345,6 +4367,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active); | |||
4345 | * | 4367 | * |
4346 | * Determine whether %current is a workqueue rescuer. Can be used from | 4368 | * Determine whether %current is a workqueue rescuer. Can be used from |
4347 | * work functions to determine whether it's being run off the rescuer task. | 4369 | * work functions to determine whether it's being run off the rescuer task. |
4370 | * | ||
4371 | * Return: %true if %current is a workqueue rescuer. %false otherwise. | ||
4348 | */ | 4372 | */ |
4349 | bool current_is_workqueue_rescuer(void) | 4373 | bool current_is_workqueue_rescuer(void) |
4350 | { | 4374 | { |
@@ -4368,7 +4392,7 @@ bool current_is_workqueue_rescuer(void) | |||
4368 | * workqueue being congested on one CPU doesn't mean the workqueue is also | 4392 | * workqueue being congested on one CPU doesn't mean the workqueue is also |
4369 | * contested on other CPUs / NUMA nodes. | 4393 | * contested on other CPUs / NUMA nodes. |
4370 | * | 4394 | * |
4371 | * RETURNS: | 4395 | * Return: |
4372 | * %true if congested, %false otherwise. | 4396 | * %true if congested, %false otherwise. |
4373 | */ | 4397 | */ |
4374 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) | 4398 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) |
@@ -4401,7 +4425,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); | |||
4401 | * synchronization around this function and the test result is | 4425 | * synchronization around this function and the test result is |
4402 | * unreliable and only useful as advisory hints or for debugging. | 4426 | * unreliable and only useful as advisory hints or for debugging. |
4403 | * | 4427 | * |
4404 | * RETURNS: | 4428 | * Return: |
4405 | * OR'd bitmask of WORK_BUSY_* bits. | 4429 | * OR'd bitmask of WORK_BUSY_* bits. |
4406 | */ | 4430 | */ |
4407 | unsigned int work_busy(struct work_struct *work) | 4431 | unsigned int work_busy(struct work_struct *work) |
@@ -4779,9 +4803,10 @@ static void work_for_cpu_fn(struct work_struct *work) | |||
4779 | * @fn: the function to run | 4803 | * @fn: the function to run |
4780 | * @arg: the function arg | 4804 | * @arg: the function arg |
4781 | * | 4805 | * |
4782 | * This will return the value @fn returns. | ||
4783 | * It is up to the caller to ensure that the cpu doesn't go offline. | 4806 | * It is up to the caller to ensure that the cpu doesn't go offline. |
4784 | * The caller must not hold any locks which would prevent @fn from completing. | 4807 | * The caller must not hold any locks which would prevent @fn from completing. |
4808 | * | ||
4809 | * Return: The value @fn returns. | ||
4785 | */ | 4810 | */ |
4786 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 4811 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
4787 | { | 4812 | { |
@@ -4853,7 +4878,7 @@ void freeze_workqueues_begin(void) | |||
4853 | * CONTEXT: | 4878 | * CONTEXT: |
4854 | * Grabs and releases wq_pool_mutex. | 4879 | * Grabs and releases wq_pool_mutex. |
4855 | * | 4880 | * |
4856 | * RETURNS: | 4881 | * Return: |
4857 | * %true if some freezable workqueues are still busy. %false if freezing | 4882 | * %true if some freezable workqueues are still busy. %false if freezing |
4858 | * is complete. | 4883 | * is complete. |
4859 | */ | 4884 | */ |