diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 148 |
1 files changed, 90 insertions, 58 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e93f7b9067d8..987293d03ebc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -16,9 +16,10 @@ | |||
16 | * | 16 | * |
17 | * This is the generic async execution mechanism. Work items as are | 17 | * This is the generic async execution mechanism. Work items as are |
18 | * executed in process context. The worker pool is shared and | 18 | * executed in process context. The worker pool is shared and |
19 | * automatically managed. There is one worker pool for each CPU and | 19 | * automatically managed. There are two worker pools for each CPU (one for |
20 | * one extra for works which are better served by workers which are | 20 | * normal work items and the other for high priority ones) and some extra |
21 | * not bound to any specific CPU. | 21 | * pools for workqueues which are not bound to any specific CPU - the |
22 | * number of these backing pools is dynamic. | ||
22 | * | 23 | * |
23 | * Please read Documentation/workqueue.txt for details. | 24 | * Please read Documentation/workqueue.txt for details. |
24 | */ | 25 | */ |
@@ -540,6 +541,8 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
540 | * This must be called either with pwq_lock held or sched RCU read locked. | 541 | * This must be called either with pwq_lock held or sched RCU read locked. |
541 | * If the pwq needs to be used beyond the locking in effect, the caller is | 542 | * If the pwq needs to be used beyond the locking in effect, the caller is |
542 | * responsible for guaranteeing that the pwq stays online. | 543 | * responsible for guaranteeing that the pwq stays online. |
544 | * | ||
545 | * Return: The unbound pool_workqueue for @node. | ||
543 | */ | 546 | */ |
544 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, | 547 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, |
545 | int node) | 548 | int node) |
@@ -638,8 +641,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
638 | * get_work_pool - return the worker_pool a given work was associated with | 641 | * get_work_pool - return the worker_pool a given work was associated with |
639 | * @work: the work item of interest | 642 | * @work: the work item of interest |
640 | * | 643 | * |
641 | * Return the worker_pool @work was last associated with. %NULL if none. | ||
642 | * | ||
643 | * Pools are created and destroyed under wq_pool_mutex, and allows read | 644 | * Pools are created and destroyed under wq_pool_mutex, and allows read |
644 | * access under sched-RCU read lock. As such, this function should be | 645 | * access under sched-RCU read lock. As such, this function should be |
645 | * called under wq_pool_mutex or with preemption disabled. | 646 | * called under wq_pool_mutex or with preemption disabled. |
@@ -648,6 +649,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
648 | * mentioned locking is in effect. If the returned pool needs to be used | 649 | * mentioned locking is in effect. If the returned pool needs to be used |
649 | * beyond the critical section, the caller is responsible for ensuring the | 650 | * beyond the critical section, the caller is responsible for ensuring the |
650 | * returned pool is and stays online. | 651 | * returned pool is and stays online. |
652 | * | ||
653 | * Return: The worker_pool @work was last associated with. %NULL if none. | ||
651 | */ | 654 | */ |
652 | static struct worker_pool *get_work_pool(struct work_struct *work) | 655 | static struct worker_pool *get_work_pool(struct work_struct *work) |
653 | { | 656 | { |
@@ -671,7 +674,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work) | |||
671 | * get_work_pool_id - return the worker pool ID a given work is associated with | 674 | * get_work_pool_id - return the worker pool ID a given work is associated with |
672 | * @work: the work item of interest | 675 | * @work: the work item of interest |
673 | * | 676 | * |
674 | * Return the worker_pool ID @work was last associated with. | 677 | * Return: The worker_pool ID @work was last associated with. |
675 | * %WORK_OFFQ_POOL_NONE if none. | 678 | * %WORK_OFFQ_POOL_NONE if none. |
676 | */ | 679 | */ |
677 | static int get_work_pool_id(struct work_struct *work) | 680 | static int get_work_pool_id(struct work_struct *work) |
@@ -830,7 +833,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu) | |||
830 | * CONTEXT: | 833 | * CONTEXT: |
831 | * spin_lock_irq(rq->lock) | 834 | * spin_lock_irq(rq->lock) |
832 | * | 835 | * |
833 | * RETURNS: | 836 | * Return: |
834 | * Worker task on @cpu to wake up, %NULL if none. | 837 | * Worker task on @cpu to wake up, %NULL if none. |
835 | */ | 838 | */ |
836 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) | 839 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) |
@@ -965,8 +968,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
965 | * CONTEXT: | 968 | * CONTEXT: |
966 | * spin_lock_irq(pool->lock). | 969 | * spin_lock_irq(pool->lock). |
967 | * | 970 | * |
968 | * RETURNS: | 971 | * Return: |
969 | * Pointer to worker which is executing @work if found, NULL | 972 | * Pointer to worker which is executing @work if found, %NULL |
970 | * otherwise. | 973 | * otherwise. |
971 | */ | 974 | */ |
972 | static struct worker *find_worker_executing_work(struct worker_pool *pool, | 975 | static struct worker *find_worker_executing_work(struct worker_pool *pool, |
@@ -1154,14 +1157,16 @@ out_put: | |||
1154 | * @flags: place to store irq state | 1157 | * @flags: place to store irq state |
1155 | * | 1158 | * |
1156 | * Try to grab PENDING bit of @work. This function can handle @work in any | 1159 | * Try to grab PENDING bit of @work. This function can handle @work in any |
1157 | * stable state - idle, on timer or on worklist. Return values are | 1160 | * stable state - idle, on timer or on worklist. |
1158 | * | 1161 | * |
1162 | * Return: | ||
1159 | * 1 if @work was pending and we successfully stole PENDING | 1163 | * 1 if @work was pending and we successfully stole PENDING |
1160 | * 0 if @work was idle and we claimed PENDING | 1164 | * 0 if @work was idle and we claimed PENDING |
1161 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry | 1165 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry |
1162 | * -ENOENT if someone else is canceling @work, this state may persist | 1166 | * -ENOENT if someone else is canceling @work, this state may persist |
1163 | * for arbitrarily long | 1167 | * for arbitrarily long |
1164 | * | 1168 | * |
1169 | * Note: | ||
1165 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting | 1170 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting |
1166 | * interrupted while holding PENDING and @work off queue, irq must be | 1171 | * interrupted while holding PENDING and @work off queue, irq must be |
1167 | * disabled on entry. This, combined with delayed_work->timer being | 1172 | * disabled on entry. This, combined with delayed_work->timer being |
@@ -1403,10 +1408,10 @@ retry: | |||
1403 | * @wq: workqueue to use | 1408 | * @wq: workqueue to use |
1404 | * @work: work to queue | 1409 | * @work: work to queue |
1405 | * | 1410 | * |
1406 | * Returns %false if @work was already on a queue, %true otherwise. | ||
1407 | * | ||
1408 | * We queue the work to a specific CPU, the caller must ensure it | 1411 | * We queue the work to a specific CPU, the caller must ensure it |
1409 | * can't go away. | 1412 | * can't go away. |
1413 | * | ||
1414 | * Return: %false if @work was already on a queue, %true otherwise. | ||
1410 | */ | 1415 | */ |
1411 | bool queue_work_on(int cpu, struct workqueue_struct *wq, | 1416 | bool queue_work_on(int cpu, struct workqueue_struct *wq, |
1412 | struct work_struct *work) | 1417 | struct work_struct *work) |
@@ -1476,7 +1481,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, | |||
1476 | * @dwork: work to queue | 1481 | * @dwork: work to queue |
1477 | * @delay: number of jiffies to wait before queueing | 1482 | * @delay: number of jiffies to wait before queueing |
1478 | * | 1483 | * |
1479 | * Returns %false if @work was already on a queue, %true otherwise. If | 1484 | * Return: %false if @work was already on a queue, %true otherwise. If |
1480 | * @delay is zero and @dwork is idle, it will be scheduled for immediate | 1485 | * @delay is zero and @dwork is idle, it will be scheduled for immediate |
1481 | * execution. | 1486 | * execution. |
1482 | */ | 1487 | */ |
@@ -1512,7 +1517,7 @@ EXPORT_SYMBOL(queue_delayed_work_on); | |||
1512 | * zero, @work is guaranteed to be scheduled immediately regardless of its | 1517 | * zero, @work is guaranteed to be scheduled immediately regardless of its |
1513 | * current state. | 1518 | * current state. |
1514 | * | 1519 | * |
1515 | * Returns %false if @dwork was idle and queued, %true if @dwork was | 1520 | * Return: %false if @dwork was idle and queued, %true if @dwork was |
1516 | * pending and its timer was modified. | 1521 | * pending and its timer was modified. |
1517 | * | 1522 | * |
1518 | * This function is safe to call from any context including IRQ handler. | 1523 | * This function is safe to call from any context including IRQ handler. |
@@ -1627,7 +1632,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1627 | * Might sleep. Called without any lock but returns with pool->lock | 1632 | * Might sleep. Called without any lock but returns with pool->lock |
1628 | * held. | 1633 | * held. |
1629 | * | 1634 | * |
1630 | * RETURNS: | 1635 | * Return: |
1631 | * %true if the associated pool is online (@worker is successfully | 1636 | * %true if the associated pool is online (@worker is successfully |
1632 | * bound), %false if offline. | 1637 | * bound), %false if offline. |
1633 | */ | 1638 | */ |
@@ -1688,7 +1693,7 @@ static struct worker *alloc_worker(void) | |||
1688 | * CONTEXT: | 1693 | * CONTEXT: |
1689 | * Might sleep. Does GFP_KERNEL allocations. | 1694 | * Might sleep. Does GFP_KERNEL allocations. |
1690 | * | 1695 | * |
1691 | * RETURNS: | 1696 | * Return: |
1692 | * Pointer to the newly created worker. | 1697 | * Pointer to the newly created worker. |
1693 | */ | 1698 | */ |
1694 | static struct worker *create_worker(struct worker_pool *pool) | 1699 | static struct worker *create_worker(struct worker_pool *pool) |
@@ -1788,6 +1793,8 @@ static void start_worker(struct worker *worker) | |||
1788 | * @pool: the target pool | 1793 | * @pool: the target pool |
1789 | * | 1794 | * |
1790 | * Grab the managership of @pool and create and start a new worker for it. | 1795 | * Grab the managership of @pool and create and start a new worker for it. |
1796 | * | ||
1797 | * Return: 0 on success. A negative error code otherwise. | ||
1791 | */ | 1798 | */ |
1792 | static int create_and_start_worker(struct worker_pool *pool) | 1799 | static int create_and_start_worker(struct worker_pool *pool) |
1793 | { | 1800 | { |
@@ -1932,7 +1939,7 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
1932 | * multiple times. Does GFP_KERNEL allocations. Called only from | 1939 | * multiple times. Does GFP_KERNEL allocations. Called only from |
1933 | * manager. | 1940 | * manager. |
1934 | * | 1941 | * |
1935 | * RETURNS: | 1942 | * Return: |
1936 | * %false if no action was taken and pool->lock stayed locked, %true | 1943 | * %false if no action was taken and pool->lock stayed locked, %true |
1937 | * otherwise. | 1944 | * otherwise. |
1938 | */ | 1945 | */ |
@@ -1989,7 +1996,7 @@ restart: | |||
1989 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 1996 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
1990 | * multiple times. Called only from manager. | 1997 | * multiple times. Called only from manager. |
1991 | * | 1998 | * |
1992 | * RETURNS: | 1999 | * Return: |
1993 | * %false if no action was taken and pool->lock stayed locked, %true | 2000 | * %false if no action was taken and pool->lock stayed locked, %true |
1994 | * otherwise. | 2001 | * otherwise. |
1995 | */ | 2002 | */ |
@@ -2032,9 +2039,12 @@ static bool maybe_destroy_workers(struct worker_pool *pool) | |||
2032 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 2039 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
2033 | * multiple times. Does GFP_KERNEL allocations. | 2040 | * multiple times. Does GFP_KERNEL allocations. |
2034 | * | 2041 | * |
2035 | * RETURNS: | 2042 | * Return: |
2036 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 2043 | * %false if the pool don't need management and the caller can safely start |
2037 | * multiple times. Does GFP_KERNEL allocations. | 2044 | * processing works, %true indicates that the function released pool->lock |
2045 | * and reacquired it to perform some management function and that the | ||
2046 | * conditions that the caller verified while holding the lock before | ||
2047 | * calling the function might no longer be true. | ||
2038 | */ | 2048 | */ |
2039 | static bool manage_workers(struct worker *worker) | 2049 | static bool manage_workers(struct worker *worker) |
2040 | { | 2050 | { |
@@ -2255,6 +2265,8 @@ static void process_scheduled_works(struct worker *worker) | |||
2255 | * work items regardless of their specific target workqueue. The only | 2265 | * work items regardless of their specific target workqueue. The only |
2256 | * exception is work items which belong to workqueues with a rescuer which | 2266 | * exception is work items which belong to workqueues with a rescuer which |
2257 | * will be explained in rescuer_thread(). | 2267 | * will be explained in rescuer_thread(). |
2268 | * | ||
2269 | * Return: 0 | ||
2258 | */ | 2270 | */ |
2259 | static int worker_thread(void *__worker) | 2271 | static int worker_thread(void *__worker) |
2260 | { | 2272 | { |
@@ -2353,6 +2365,8 @@ sleep: | |||
2353 | * those works so that forward progress can be guaranteed. | 2365 | * those works so that forward progress can be guaranteed. |
2354 | * | 2366 | * |
2355 | * This should happen rarely. | 2367 | * This should happen rarely. |
2368 | * | ||
2369 | * Return: 0 | ||
2356 | */ | 2370 | */ |
2357 | static int rescuer_thread(void *__rescuer) | 2371 | static int rescuer_thread(void *__rescuer) |
2358 | { | 2372 | { |
@@ -2525,7 +2539,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, | |||
2525 | * CONTEXT: | 2539 | * CONTEXT: |
2526 | * mutex_lock(wq->mutex). | 2540 | * mutex_lock(wq->mutex). |
2527 | * | 2541 | * |
2528 | * RETURNS: | 2542 | * Return: |
2529 | * %true if @flush_color >= 0 and there's something to flush. %false | 2543 | * %true if @flush_color >= 0 and there's something to flush. %false |
2530 | * otherwise. | 2544 | * otherwise. |
2531 | */ | 2545 | */ |
@@ -2846,7 +2860,7 @@ static bool __flush_work(struct work_struct *work) | |||
2846 | * Wait until @work has finished execution. @work is guaranteed to be idle | 2860 | * Wait until @work has finished execution. @work is guaranteed to be idle |
2847 | * on return if it hasn't been requeued since flush started. | 2861 | * on return if it hasn't been requeued since flush started. |
2848 | * | 2862 | * |
2849 | * RETURNS: | 2863 | * Return: |
2850 | * %true if flush_work() waited for the work to finish execution, | 2864 | * %true if flush_work() waited for the work to finish execution, |
2851 | * %false if it was already idle. | 2865 | * %false if it was already idle. |
2852 | */ | 2866 | */ |
@@ -2898,7 +2912,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2898 | * The caller must ensure that the workqueue on which @work was last | 2912 | * The caller must ensure that the workqueue on which @work was last |
2899 | * queued can't be destroyed before this function returns. | 2913 | * queued can't be destroyed before this function returns. |
2900 | * | 2914 | * |
2901 | * RETURNS: | 2915 | * Return: |
2902 | * %true if @work was pending, %false otherwise. | 2916 | * %true if @work was pending, %false otherwise. |
2903 | */ | 2917 | */ |
2904 | bool cancel_work_sync(struct work_struct *work) | 2918 | bool cancel_work_sync(struct work_struct *work) |
@@ -2915,7 +2929,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); | |||
2915 | * immediate execution. Like flush_work(), this function only | 2929 | * immediate execution. Like flush_work(), this function only |
2916 | * considers the last queueing instance of @dwork. | 2930 | * considers the last queueing instance of @dwork. |
2917 | * | 2931 | * |
2918 | * RETURNS: | 2932 | * Return: |
2919 | * %true if flush_work() waited for the work to finish execution, | 2933 | * %true if flush_work() waited for the work to finish execution, |
2920 | * %false if it was already idle. | 2934 | * %false if it was already idle. |
2921 | */ | 2935 | */ |
@@ -2933,11 +2947,15 @@ EXPORT_SYMBOL(flush_delayed_work); | |||
2933 | * cancel_delayed_work - cancel a delayed work | 2947 | * cancel_delayed_work - cancel a delayed work |
2934 | * @dwork: delayed_work to cancel | 2948 | * @dwork: delayed_work to cancel |
2935 | * | 2949 | * |
2936 | * Kill off a pending delayed_work. Returns %true if @dwork was pending | 2950 | * Kill off a pending delayed_work. |
2937 | * and canceled; %false if wasn't pending. Note that the work callback | 2951 | * |
2938 | * function may still be running on return, unless it returns %true and the | 2952 | * Return: %true if @dwork was pending and canceled; %false if it wasn't |
2939 | * work doesn't re-arm itself. Explicitly flush or use | 2953 | * pending. |
2940 | * cancel_delayed_work_sync() to wait on it. | 2954 | * |
2955 | * Note: | ||
2956 | * The work callback function may still be running on return, unless | ||
2957 | * it returns %true and the work doesn't re-arm itself. Explicitly flush or | ||
2958 | * use cancel_delayed_work_sync() to wait on it. | ||
2941 | * | 2959 | * |
2942 | * This function is safe to call from any context including IRQ handler. | 2960 | * This function is safe to call from any context including IRQ handler. |
2943 | */ | 2961 | */ |
@@ -2966,7 +2984,7 @@ EXPORT_SYMBOL(cancel_delayed_work); | |||
2966 | * | 2984 | * |
2967 | * This is cancel_work_sync() for delayed works. | 2985 | * This is cancel_work_sync() for delayed works. |
2968 | * | 2986 | * |
2969 | * RETURNS: | 2987 | * Return: |
2970 | * %true if @dwork was pending, %false otherwise. | 2988 | * %true if @dwork was pending, %false otherwise. |
2971 | */ | 2989 | */ |
2972 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | 2990 | bool cancel_delayed_work_sync(struct delayed_work *dwork) |
@@ -2983,7 +3001,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); | |||
2983 | * system workqueue and blocks until all CPUs have completed. | 3001 | * system workqueue and blocks until all CPUs have completed. |
2984 | * schedule_on_each_cpu() is very slow. | 3002 | * schedule_on_each_cpu() is very slow. |
2985 | * | 3003 | * |
2986 | * RETURNS: | 3004 | * Return: |
2987 | * 0 on success, -errno on failure. | 3005 | * 0 on success, -errno on failure. |
2988 | */ | 3006 | */ |
2989 | int schedule_on_each_cpu(work_func_t func) | 3007 | int schedule_on_each_cpu(work_func_t func) |
@@ -3051,7 +3069,7 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
3051 | * Executes the function immediately if process context is available, | 3069 | * Executes the function immediately if process context is available, |
3052 | * otherwise schedules the function for delayed execution. | 3070 | * otherwise schedules the function for delayed execution. |
3053 | * | 3071 | * |
3054 | * Returns: 0 - function was executed | 3072 | * Return: 0 - function was executed |
3055 | * 1 - function was scheduled for execution | 3073 | * 1 - function was scheduled for execution |
3056 | */ | 3074 | */ |
3057 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 3075 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
@@ -3095,25 +3113,26 @@ static struct workqueue_struct *dev_to_wq(struct device *dev) | |||
3095 | return wq_dev->wq; | 3113 | return wq_dev->wq; |
3096 | } | 3114 | } |
3097 | 3115 | ||
3098 | static ssize_t wq_per_cpu_show(struct device *dev, | 3116 | static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, |
3099 | struct device_attribute *attr, char *buf) | 3117 | char *buf) |
3100 | { | 3118 | { |
3101 | struct workqueue_struct *wq = dev_to_wq(dev); | 3119 | struct workqueue_struct *wq = dev_to_wq(dev); |
3102 | 3120 | ||
3103 | return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); | 3121 | return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); |
3104 | } | 3122 | } |
3123 | static DEVICE_ATTR_RO(per_cpu); | ||
3105 | 3124 | ||
3106 | static ssize_t wq_max_active_show(struct device *dev, | 3125 | static ssize_t max_active_show(struct device *dev, |
3107 | struct device_attribute *attr, char *buf) | 3126 | struct device_attribute *attr, char *buf) |
3108 | { | 3127 | { |
3109 | struct workqueue_struct *wq = dev_to_wq(dev); | 3128 | struct workqueue_struct *wq = dev_to_wq(dev); |
3110 | 3129 | ||
3111 | return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); | 3130 | return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); |
3112 | } | 3131 | } |
3113 | 3132 | ||
3114 | static ssize_t wq_max_active_store(struct device *dev, | 3133 | static ssize_t max_active_store(struct device *dev, |
3115 | struct device_attribute *attr, | 3134 | struct device_attribute *attr, const char *buf, |
3116 | const char *buf, size_t count) | 3135 | size_t count) |
3117 | { | 3136 | { |
3118 | struct workqueue_struct *wq = dev_to_wq(dev); | 3137 | struct workqueue_struct *wq = dev_to_wq(dev); |
3119 | int val; | 3138 | int val; |
@@ -3124,12 +3143,14 @@ static ssize_t wq_max_active_store(struct device *dev, | |||
3124 | workqueue_set_max_active(wq, val); | 3143 | workqueue_set_max_active(wq, val); |
3125 | return count; | 3144 | return count; |
3126 | } | 3145 | } |
3146 | static DEVICE_ATTR_RW(max_active); | ||
3127 | 3147 | ||
3128 | static struct device_attribute wq_sysfs_attrs[] = { | 3148 | static struct attribute *wq_sysfs_attrs[] = { |
3129 | __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL), | 3149 | &dev_attr_per_cpu.attr, |
3130 | __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store), | 3150 | &dev_attr_max_active.attr, |
3131 | __ATTR_NULL, | 3151 | NULL, |
3132 | }; | 3152 | }; |
3153 | ATTRIBUTE_GROUPS(wq_sysfs); | ||
3133 | 3154 | ||
3134 | static ssize_t wq_pool_ids_show(struct device *dev, | 3155 | static ssize_t wq_pool_ids_show(struct device *dev, |
3135 | struct device_attribute *attr, char *buf) | 3156 | struct device_attribute *attr, char *buf) |
@@ -3279,7 +3300,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = { | |||
3279 | 3300 | ||
3280 | static struct bus_type wq_subsys = { | 3301 | static struct bus_type wq_subsys = { |
3281 | .name = "workqueue", | 3302 | .name = "workqueue", |
3282 | .dev_attrs = wq_sysfs_attrs, | 3303 | .dev_groups = wq_sysfs_groups, |
3283 | }; | 3304 | }; |
3284 | 3305 | ||
3285 | static int __init wq_sysfs_init(void) | 3306 | static int __init wq_sysfs_init(void) |
@@ -3308,7 +3329,7 @@ static void wq_device_release(struct device *dev) | |||
3308 | * apply_workqueue_attrs() may race against userland updating the | 3329 | * apply_workqueue_attrs() may race against userland updating the |
3309 | * attributes. | 3330 | * attributes. |
3310 | * | 3331 | * |
3311 | * Returns 0 on success, -errno on failure. | 3332 | * Return: 0 on success, -errno on failure. |
3312 | */ | 3333 | */ |
3313 | int workqueue_sysfs_register(struct workqueue_struct *wq) | 3334 | int workqueue_sysfs_register(struct workqueue_struct *wq) |
3314 | { | 3335 | { |
@@ -3401,7 +3422,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) | |||
3401 | * @gfp_mask: allocation mask to use | 3422 | * @gfp_mask: allocation mask to use |
3402 | * | 3423 | * |
3403 | * Allocate a new workqueue_attrs, initialize with default settings and | 3424 | * Allocate a new workqueue_attrs, initialize with default settings and |
3404 | * return it. Returns NULL on failure. | 3425 | * return it. |
3426 | * | ||
3427 | * Return: The allocated new workqueue_attr on success. %NULL on failure. | ||
3405 | */ | 3428 | */ |
3406 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) | 3429 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) |
3407 | { | 3430 | { |
@@ -3460,7 +3483,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, | |||
3460 | * @pool: worker_pool to initialize | 3483 | * @pool: worker_pool to initialize |
3461 | * | 3484 | * |
3462 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. | 3485 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. |
3463 | * Returns 0 on success, -errno on failure. Even on failure, all fields | 3486 | * |
3487 | * Return: 0 on success, -errno on failure. Even on failure, all fields | ||
3464 | * inside @pool proper are initialized and put_unbound_pool() can be called | 3488 | * inside @pool proper are initialized and put_unbound_pool() can be called |
3465 | * on @pool safely to release it. | 3489 | * on @pool safely to release it. |
3466 | */ | 3490 | */ |
@@ -3567,9 +3591,12 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3567 | * Obtain a worker_pool which has the same attributes as @attrs, bump the | 3591 | * Obtain a worker_pool which has the same attributes as @attrs, bump the |
3568 | * reference count and return it. If there already is a matching | 3592 | * reference count and return it. If there already is a matching |
3569 | * worker_pool, it will be used; otherwise, this function attempts to | 3593 | * worker_pool, it will be used; otherwise, this function attempts to |
3570 | * create a new one. On failure, returns NULL. | 3594 | * create a new one. |
3571 | * | 3595 | * |
3572 | * Should be called with wq_pool_mutex held. | 3596 | * Should be called with wq_pool_mutex held. |
3597 | * | ||
3598 | * Return: On success, a worker_pool with the same attributes as @attrs. | ||
3599 | * On failure, %NULL. | ||
3573 | */ | 3600 | */ |
3574 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | 3601 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) |
3575 | { | 3602 | { |
@@ -3805,9 +3832,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
3805 | * | 3832 | * |
3806 | * Calculate the cpumask a workqueue with @attrs should use on @node. If | 3833 | * Calculate the cpumask a workqueue with @attrs should use on @node. If |
3807 | * @cpu_going_down is >= 0, that cpu is considered offline during | 3834 | * @cpu_going_down is >= 0, that cpu is considered offline during |
3808 | * calculation. The result is stored in @cpumask. This function returns | 3835 | * calculation. The result is stored in @cpumask. |
3809 | * %true if the resulting @cpumask is different from @attrs->cpumask, | ||
3810 | * %false if equal. | ||
3811 | * | 3836 | * |
3812 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If | 3837 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If |
3813 | * enabled and @node has online CPUs requested by @attrs, the returned | 3838 | * enabled and @node has online CPUs requested by @attrs, the returned |
@@ -3816,6 +3841,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
3816 | * | 3841 | * |
3817 | * The caller is responsible for ensuring that the cpumask of @node stays | 3842 | * The caller is responsible for ensuring that the cpumask of @node stays |
3818 | * stable. | 3843 | * stable. |
3844 | * | ||
3845 | * Return: %true if the resulting @cpumask is different from @attrs->cpumask, | ||
3846 | * %false if equal. | ||
3819 | */ | 3847 | */ |
3820 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, | 3848 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, |
3821 | int cpu_going_down, cpumask_t *cpumask) | 3849 | int cpu_going_down, cpumask_t *cpumask) |
@@ -3869,8 +3897,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, | |||
3869 | * items finish. Note that a work item which repeatedly requeues itself | 3897 | * items finish. Note that a work item which repeatedly requeues itself |
3870 | * back-to-back will stay on its current pwq. | 3898 | * back-to-back will stay on its current pwq. |
3871 | * | 3899 | * |
3872 | * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on | 3900 | * Performs GFP_KERNEL allocations. |
3873 | * failure. | 3901 | * |
3902 | * Return: 0 on success and -errno on failure. | ||
3874 | */ | 3903 | */ |
3875 | int apply_workqueue_attrs(struct workqueue_struct *wq, | 3904 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
3876 | const struct workqueue_attrs *attrs) | 3905 | const struct workqueue_attrs *attrs) |
@@ -4338,6 +4367,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active); | |||
4338 | * | 4367 | * |
4339 | * Determine whether %current is a workqueue rescuer. Can be used from | 4368 | * Determine whether %current is a workqueue rescuer. Can be used from |
4340 | * work functions to determine whether it's being run off the rescuer task. | 4369 | * work functions to determine whether it's being run off the rescuer task. |
4370 | * | ||
4371 | * Return: %true if %current is a workqueue rescuer. %false otherwise. | ||
4341 | */ | 4372 | */ |
4342 | bool current_is_workqueue_rescuer(void) | 4373 | bool current_is_workqueue_rescuer(void) |
4343 | { | 4374 | { |
@@ -4361,7 +4392,7 @@ bool current_is_workqueue_rescuer(void) | |||
4361 | * workqueue being congested on one CPU doesn't mean the workqueue is also | 4392 | * workqueue being congested on one CPU doesn't mean the workqueue is also |
4362 | * contested on other CPUs / NUMA nodes. | 4393 | * contested on other CPUs / NUMA nodes. |
4363 | * | 4394 | * |
4364 | * RETURNS: | 4395 | * Return: |
4365 | * %true if congested, %false otherwise. | 4396 | * %true if congested, %false otherwise. |
4366 | */ | 4397 | */ |
4367 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) | 4398 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) |
@@ -4394,7 +4425,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); | |||
4394 | * synchronization around this function and the test result is | 4425 | * synchronization around this function and the test result is |
4395 | * unreliable and only useful as advisory hints or for debugging. | 4426 | * unreliable and only useful as advisory hints or for debugging. |
4396 | * | 4427 | * |
4397 | * RETURNS: | 4428 | * Return: |
4398 | * OR'd bitmask of WORK_BUSY_* bits. | 4429 | * OR'd bitmask of WORK_BUSY_* bits. |
4399 | */ | 4430 | */ |
4400 | unsigned int work_busy(struct work_struct *work) | 4431 | unsigned int work_busy(struct work_struct *work) |
@@ -4772,9 +4803,10 @@ static void work_for_cpu_fn(struct work_struct *work) | |||
4772 | * @fn: the function to run | 4803 | * @fn: the function to run |
4773 | * @arg: the function arg | 4804 | * @arg: the function arg |
4774 | * | 4805 | * |
4775 | * This will return the value @fn returns. | ||
4776 | * It is up to the caller to ensure that the cpu doesn't go offline. | 4806 | * It is up to the caller to ensure that the cpu doesn't go offline. |
4777 | * The caller must not hold any locks which would prevent @fn from completing. | 4807 | * The caller must not hold any locks which would prevent @fn from completing. |
4808 | * | ||
4809 | * Return: The value @fn returns. | ||
4778 | */ | 4810 | */ |
4779 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 4811 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
4780 | { | 4812 | { |
@@ -4846,7 +4878,7 @@ void freeze_workqueues_begin(void) | |||
4846 | * CONTEXT: | 4878 | * CONTEXT: |
4847 | * Grabs and releases wq_pool_mutex. | 4879 | * Grabs and releases wq_pool_mutex. |
4848 | * | 4880 | * |
4849 | * RETURNS: | 4881 | * Return: |
4850 | * %true if some freezable workqueues are still busy. %false if freezing | 4882 | * %true if some freezable workqueues are still busy. %false if freezing |
4851 | * is complete. | 4883 | * is complete. |
4852 | */ | 4884 | */ |