diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 157 |
1 files changed, 99 insertions, 58 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7f5d4be22034..987293d03ebc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -16,9 +16,10 @@ | |||
| 16 | * | 16 | * |
| 17 | * This is the generic async execution mechanism. Work items as are | 17 | * This is the generic async execution mechanism. Work items as are |
| 18 | * executed in process context. The worker pool is shared and | 18 | * executed in process context. The worker pool is shared and |
| 19 | * automatically managed. There is one worker pool for each CPU and | 19 | * automatically managed. There are two worker pools for each CPU (one for |
| 20 | * one extra for works which are better served by workers which are | 20 | * normal work items and the other for high priority ones) and some extra |
| 21 | * not bound to any specific CPU. | 21 | * pools for workqueues which are not bound to any specific CPU - the |
| 22 | * number of these backing pools is dynamic. | ||
| 22 | * | 23 | * |
| 23 | * Please read Documentation/workqueue.txt for details. | 24 | * Please read Documentation/workqueue.txt for details. |
| 24 | */ | 25 | */ |
| @@ -540,6 +541,8 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
| 540 | * This must be called either with pwq_lock held or sched RCU read locked. | 541 | * This must be called either with pwq_lock held or sched RCU read locked. |
| 541 | * If the pwq needs to be used beyond the locking in effect, the caller is | 542 | * If the pwq needs to be used beyond the locking in effect, the caller is |
| 542 | * responsible for guaranteeing that the pwq stays online. | 543 | * responsible for guaranteeing that the pwq stays online. |
| 544 | * | ||
| 545 | * Return: The unbound pool_workqueue for @node. | ||
| 543 | */ | 546 | */ |
| 544 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, | 547 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, |
| 545 | int node) | 548 | int node) |
| @@ -638,8 +641,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
| 638 | * get_work_pool - return the worker_pool a given work was associated with | 641 | * get_work_pool - return the worker_pool a given work was associated with |
| 639 | * @work: the work item of interest | 642 | * @work: the work item of interest |
| 640 | * | 643 | * |
| 641 | * Return the worker_pool @work was last associated with. %NULL if none. | ||
| 642 | * | ||
| 643 | * Pools are created and destroyed under wq_pool_mutex, and allows read | 644 | * Pools are created and destroyed under wq_pool_mutex, and allows read |
| 644 | * access under sched-RCU read lock. As such, this function should be | 645 | * access under sched-RCU read lock. As such, this function should be |
| 645 | * called under wq_pool_mutex or with preemption disabled. | 646 | * called under wq_pool_mutex or with preemption disabled. |
| @@ -648,6 +649,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
| 648 | * mentioned locking is in effect. If the returned pool needs to be used | 649 | * mentioned locking is in effect. If the returned pool needs to be used |
| 649 | * beyond the critical section, the caller is responsible for ensuring the | 650 | * beyond the critical section, the caller is responsible for ensuring the |
| 650 | * returned pool is and stays online. | 651 | * returned pool is and stays online. |
| 652 | * | ||
| 653 | * Return: The worker_pool @work was last associated with. %NULL if none. | ||
| 651 | */ | 654 | */ |
| 652 | static struct worker_pool *get_work_pool(struct work_struct *work) | 655 | static struct worker_pool *get_work_pool(struct work_struct *work) |
| 653 | { | 656 | { |
| @@ -671,7 +674,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work) | |||
| 671 | * get_work_pool_id - return the worker pool ID a given work is associated with | 674 | * get_work_pool_id - return the worker pool ID a given work is associated with |
| 672 | * @work: the work item of interest | 675 | * @work: the work item of interest |
| 673 | * | 676 | * |
| 674 | * Return the worker_pool ID @work was last associated with. | 677 | * Return: The worker_pool ID @work was last associated with. |
| 675 | * %WORK_OFFQ_POOL_NONE if none. | 678 | * %WORK_OFFQ_POOL_NONE if none. |
| 676 | */ | 679 | */ |
| 677 | static int get_work_pool_id(struct work_struct *work) | 680 | static int get_work_pool_id(struct work_struct *work) |
| @@ -830,7 +833,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu) | |||
| 830 | * CONTEXT: | 833 | * CONTEXT: |
| 831 | * spin_lock_irq(rq->lock) | 834 | * spin_lock_irq(rq->lock) |
| 832 | * | 835 | * |
| 833 | * RETURNS: | 836 | * Return: |
| 834 | * Worker task on @cpu to wake up, %NULL if none. | 837 | * Worker task on @cpu to wake up, %NULL if none. |
| 835 | */ | 838 | */ |
| 836 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) | 839 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) |
| @@ -965,8 +968,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
| 965 | * CONTEXT: | 968 | * CONTEXT: |
| 966 | * spin_lock_irq(pool->lock). | 969 | * spin_lock_irq(pool->lock). |
| 967 | * | 970 | * |
| 968 | * RETURNS: | 971 | * Return: |
| 969 | * Pointer to worker which is executing @work if found, NULL | 972 | * Pointer to worker which is executing @work if found, %NULL |
| 970 | * otherwise. | 973 | * otherwise. |
| 971 | */ | 974 | */ |
| 972 | static struct worker *find_worker_executing_work(struct worker_pool *pool, | 975 | static struct worker *find_worker_executing_work(struct worker_pool *pool, |
| @@ -1154,14 +1157,16 @@ out_put: | |||
| 1154 | * @flags: place to store irq state | 1157 | * @flags: place to store irq state |
| 1155 | * | 1158 | * |
| 1156 | * Try to grab PENDING bit of @work. This function can handle @work in any | 1159 | * Try to grab PENDING bit of @work. This function can handle @work in any |
| 1157 | * stable state - idle, on timer or on worklist. Return values are | 1160 | * stable state - idle, on timer or on worklist. |
| 1158 | * | 1161 | * |
| 1162 | * Return: | ||
| 1159 | * 1 if @work was pending and we successfully stole PENDING | 1163 | * 1 if @work was pending and we successfully stole PENDING |
| 1160 | * 0 if @work was idle and we claimed PENDING | 1164 | * 0 if @work was idle and we claimed PENDING |
| 1161 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry | 1165 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry |
| 1162 | * -ENOENT if someone else is canceling @work, this state may persist | 1166 | * -ENOENT if someone else is canceling @work, this state may persist |
| 1163 | * for arbitrarily long | 1167 | * for arbitrarily long |
| 1164 | * | 1168 | * |
| 1169 | * Note: | ||
| 1165 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting | 1170 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting |
| 1166 | * interrupted while holding PENDING and @work off queue, irq must be | 1171 | * interrupted while holding PENDING and @work off queue, irq must be |
| 1167 | * disabled on entry. This, combined with delayed_work->timer being | 1172 | * disabled on entry. This, combined with delayed_work->timer being |
| @@ -1403,10 +1408,10 @@ retry: | |||
| 1403 | * @wq: workqueue to use | 1408 | * @wq: workqueue to use |
| 1404 | * @work: work to queue | 1409 | * @work: work to queue |
| 1405 | * | 1410 | * |
| 1406 | * Returns %false if @work was already on a queue, %true otherwise. | ||
| 1407 | * | ||
| 1408 | * We queue the work to a specific CPU, the caller must ensure it | 1411 | * We queue the work to a specific CPU, the caller must ensure it |
| 1409 | * can't go away. | 1412 | * can't go away. |
| 1413 | * | ||
| 1414 | * Return: %false if @work was already on a queue, %true otherwise. | ||
| 1410 | */ | 1415 | */ |
| 1411 | bool queue_work_on(int cpu, struct workqueue_struct *wq, | 1416 | bool queue_work_on(int cpu, struct workqueue_struct *wq, |
| 1412 | struct work_struct *work) | 1417 | struct work_struct *work) |
| @@ -1476,7 +1481,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, | |||
| 1476 | * @dwork: work to queue | 1481 | * @dwork: work to queue |
| 1477 | * @delay: number of jiffies to wait before queueing | 1482 | * @delay: number of jiffies to wait before queueing |
| 1478 | * | 1483 | * |
| 1479 | * Returns %false if @work was already on a queue, %true otherwise. If | 1484 | * Return: %false if @work was already on a queue, %true otherwise. If |
| 1480 | * @delay is zero and @dwork is idle, it will be scheduled for immediate | 1485 | * @delay is zero and @dwork is idle, it will be scheduled for immediate |
| 1481 | * execution. | 1486 | * execution. |
| 1482 | */ | 1487 | */ |
| @@ -1512,7 +1517,7 @@ EXPORT_SYMBOL(queue_delayed_work_on); | |||
| 1512 | * zero, @work is guaranteed to be scheduled immediately regardless of its | 1517 | * zero, @work is guaranteed to be scheduled immediately regardless of its |
| 1513 | * current state. | 1518 | * current state. |
| 1514 | * | 1519 | * |
| 1515 | * Returns %false if @dwork was idle and queued, %true if @dwork was | 1520 | * Return: %false if @dwork was idle and queued, %true if @dwork was |
| 1516 | * pending and its timer was modified. | 1521 | * pending and its timer was modified. |
| 1517 | * | 1522 | * |
| 1518 | * This function is safe to call from any context including IRQ handler. | 1523 | * This function is safe to call from any context including IRQ handler. |
| @@ -1627,7 +1632,7 @@ static void worker_leave_idle(struct worker *worker) | |||
| 1627 | * Might sleep. Called without any lock but returns with pool->lock | 1632 | * Might sleep. Called without any lock but returns with pool->lock |
| 1628 | * held. | 1633 | * held. |
| 1629 | * | 1634 | * |
| 1630 | * RETURNS: | 1635 | * Return: |
| 1631 | * %true if the associated pool is online (@worker is successfully | 1636 | * %true if the associated pool is online (@worker is successfully |
| 1632 | * bound), %false if offline. | 1637 | * bound), %false if offline. |
| 1633 | */ | 1638 | */ |
| @@ -1688,7 +1693,7 @@ static struct worker *alloc_worker(void) | |||
| 1688 | * CONTEXT: | 1693 | * CONTEXT: |
| 1689 | * Might sleep. Does GFP_KERNEL allocations. | 1694 | * Might sleep. Does GFP_KERNEL allocations. |
| 1690 | * | 1695 | * |
| 1691 | * RETURNS: | 1696 | * Return: |
| 1692 | * Pointer to the newly created worker. | 1697 | * Pointer to the newly created worker. |
| 1693 | */ | 1698 | */ |
| 1694 | static struct worker *create_worker(struct worker_pool *pool) | 1699 | static struct worker *create_worker(struct worker_pool *pool) |
| @@ -1788,6 +1793,8 @@ static void start_worker(struct worker *worker) | |||
| 1788 | * @pool: the target pool | 1793 | * @pool: the target pool |
| 1789 | * | 1794 | * |
| 1790 | * Grab the managership of @pool and create and start a new worker for it. | 1795 | * Grab the managership of @pool and create and start a new worker for it. |
| 1796 | * | ||
| 1797 | * Return: 0 on success. A negative error code otherwise. | ||
| 1791 | */ | 1798 | */ |
| 1792 | static int create_and_start_worker(struct worker_pool *pool) | 1799 | static int create_and_start_worker(struct worker_pool *pool) |
| 1793 | { | 1800 | { |
| @@ -1932,7 +1939,7 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
| 1932 | * multiple times. Does GFP_KERNEL allocations. Called only from | 1939 | * multiple times. Does GFP_KERNEL allocations. Called only from |
| 1933 | * manager. | 1940 | * manager. |
| 1934 | * | 1941 | * |
| 1935 | * RETURNS: | 1942 | * Return: |
| 1936 | * %false if no action was taken and pool->lock stayed locked, %true | 1943 | * %false if no action was taken and pool->lock stayed locked, %true |
| 1937 | * otherwise. | 1944 | * otherwise. |
| 1938 | */ | 1945 | */ |
| @@ -1989,7 +1996,7 @@ restart: | |||
| 1989 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 1996 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
| 1990 | * multiple times. Called only from manager. | 1997 | * multiple times. Called only from manager. |
| 1991 | * | 1998 | * |
| 1992 | * RETURNS: | 1999 | * Return: |
| 1993 | * %false if no action was taken and pool->lock stayed locked, %true | 2000 | * %false if no action was taken and pool->lock stayed locked, %true |
| 1994 | * otherwise. | 2001 | * otherwise. |
| 1995 | */ | 2002 | */ |
| @@ -2032,9 +2039,12 @@ static bool maybe_destroy_workers(struct worker_pool *pool) | |||
| 2032 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 2039 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
| 2033 | * multiple times. Does GFP_KERNEL allocations. | 2040 | * multiple times. Does GFP_KERNEL allocations. |
| 2034 | * | 2041 | * |
| 2035 | * RETURNS: | 2042 | * Return: |
| 2036 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 2043 | * %false if the pool don't need management and the caller can safely start |
| 2037 | * multiple times. Does GFP_KERNEL allocations. | 2044 | * processing works, %true indicates that the function released pool->lock |
| 2045 | * and reacquired it to perform some management function and that the | ||
| 2046 | * conditions that the caller verified while holding the lock before | ||
| 2047 | * calling the function might no longer be true. | ||
| 2038 | */ | 2048 | */ |
| 2039 | static bool manage_workers(struct worker *worker) | 2049 | static bool manage_workers(struct worker *worker) |
| 2040 | { | 2050 | { |
| @@ -2201,6 +2211,15 @@ __acquires(&pool->lock) | |||
| 2201 | dump_stack(); | 2211 | dump_stack(); |
| 2202 | } | 2212 | } |
| 2203 | 2213 | ||
| 2214 | /* | ||
| 2215 | * The following prevents a kworker from hogging CPU on !PREEMPT | ||
| 2216 | * kernels, where a requeueing work item waiting for something to | ||
| 2217 | * happen could deadlock with stop_machine as such work item could | ||
| 2218 | * indefinitely requeue itself while all other CPUs are trapped in | ||
| 2219 | * stop_machine. | ||
| 2220 | */ | ||
| 2221 | cond_resched(); | ||
| 2222 | |||
| 2204 | spin_lock_irq(&pool->lock); | 2223 | spin_lock_irq(&pool->lock); |
| 2205 | 2224 | ||
| 2206 | /* clear cpu intensive status */ | 2225 | /* clear cpu intensive status */ |
| @@ -2246,6 +2265,8 @@ static void process_scheduled_works(struct worker *worker) | |||
| 2246 | * work items regardless of their specific target workqueue. The only | 2265 | * work items regardless of their specific target workqueue. The only |
| 2247 | * exception is work items which belong to workqueues with a rescuer which | 2266 | * exception is work items which belong to workqueues with a rescuer which |
| 2248 | * will be explained in rescuer_thread(). | 2267 | * will be explained in rescuer_thread(). |
| 2268 | * | ||
| 2269 | * Return: 0 | ||
| 2249 | */ | 2270 | */ |
| 2250 | static int worker_thread(void *__worker) | 2271 | static int worker_thread(void *__worker) |
| 2251 | { | 2272 | { |
| @@ -2344,6 +2365,8 @@ sleep: | |||
| 2344 | * those works so that forward progress can be guaranteed. | 2365 | * those works so that forward progress can be guaranteed. |
| 2345 | * | 2366 | * |
| 2346 | * This should happen rarely. | 2367 | * This should happen rarely. |
| 2368 | * | ||
| 2369 | * Return: 0 | ||
| 2347 | */ | 2370 | */ |
| 2348 | static int rescuer_thread(void *__rescuer) | 2371 | static int rescuer_thread(void *__rescuer) |
| 2349 | { | 2372 | { |
| @@ -2516,7 +2539,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, | |||
| 2516 | * CONTEXT: | 2539 | * CONTEXT: |
| 2517 | * mutex_lock(wq->mutex). | 2540 | * mutex_lock(wq->mutex). |
| 2518 | * | 2541 | * |
| 2519 | * RETURNS: | 2542 | * Return: |
| 2520 | * %true if @flush_color >= 0 and there's something to flush. %false | 2543 | * %true if @flush_color >= 0 and there's something to flush. %false |
| 2521 | * otherwise. | 2544 | * otherwise. |
| 2522 | */ | 2545 | */ |
| @@ -2837,7 +2860,7 @@ static bool __flush_work(struct work_struct *work) | |||
| 2837 | * Wait until @work has finished execution. @work is guaranteed to be idle | 2860 | * Wait until @work has finished execution. @work is guaranteed to be idle |
| 2838 | * on return if it hasn't been requeued since flush started. | 2861 | * on return if it hasn't been requeued since flush started. |
| 2839 | * | 2862 | * |
| 2840 | * RETURNS: | 2863 | * Return: |
| 2841 | * %true if flush_work() waited for the work to finish execution, | 2864 | * %true if flush_work() waited for the work to finish execution, |
| 2842 | * %false if it was already idle. | 2865 | * %false if it was already idle. |
| 2843 | */ | 2866 | */ |
| @@ -2889,7 +2912,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
| 2889 | * The caller must ensure that the workqueue on which @work was last | 2912 | * The caller must ensure that the workqueue on which @work was last |
| 2890 | * queued can't be destroyed before this function returns. | 2913 | * queued can't be destroyed before this function returns. |
| 2891 | * | 2914 | * |
| 2892 | * RETURNS: | 2915 | * Return: |
| 2893 | * %true if @work was pending, %false otherwise. | 2916 | * %true if @work was pending, %false otherwise. |
| 2894 | */ | 2917 | */ |
| 2895 | bool cancel_work_sync(struct work_struct *work) | 2918 | bool cancel_work_sync(struct work_struct *work) |
| @@ -2906,7 +2929,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); | |||
| 2906 | * immediate execution. Like flush_work(), this function only | 2929 | * immediate execution. Like flush_work(), this function only |
| 2907 | * considers the last queueing instance of @dwork. | 2930 | * considers the last queueing instance of @dwork. |
| 2908 | * | 2931 | * |
| 2909 | * RETURNS: | 2932 | * Return: |
| 2910 | * %true if flush_work() waited for the work to finish execution, | 2933 | * %true if flush_work() waited for the work to finish execution, |
| 2911 | * %false if it was already idle. | 2934 | * %false if it was already idle. |
| 2912 | */ | 2935 | */ |
| @@ -2924,11 +2947,15 @@ EXPORT_SYMBOL(flush_delayed_work); | |||
| 2924 | * cancel_delayed_work - cancel a delayed work | 2947 | * cancel_delayed_work - cancel a delayed work |
| 2925 | * @dwork: delayed_work to cancel | 2948 | * @dwork: delayed_work to cancel |
| 2926 | * | 2949 | * |
| 2927 | * Kill off a pending delayed_work. Returns %true if @dwork was pending | 2950 | * Kill off a pending delayed_work. |
| 2928 | * and canceled; %false if wasn't pending. Note that the work callback | 2951 | * |
| 2929 | * function may still be running on return, unless it returns %true and the | 2952 | * Return: %true if @dwork was pending and canceled; %false if it wasn't |
| 2930 | * work doesn't re-arm itself. Explicitly flush or use | 2953 | * pending. |
| 2931 | * cancel_delayed_work_sync() to wait on it. | 2954 | * |
| 2955 | * Note: | ||
| 2956 | * The work callback function may still be running on return, unless | ||
| 2957 | * it returns %true and the work doesn't re-arm itself. Explicitly flush or | ||
| 2958 | * use cancel_delayed_work_sync() to wait on it. | ||
| 2932 | * | 2959 | * |
| 2933 | * This function is safe to call from any context including IRQ handler. | 2960 | * This function is safe to call from any context including IRQ handler. |
| 2934 | */ | 2961 | */ |
| @@ -2957,7 +2984,7 @@ EXPORT_SYMBOL(cancel_delayed_work); | |||
| 2957 | * | 2984 | * |
| 2958 | * This is cancel_work_sync() for delayed works. | 2985 | * This is cancel_work_sync() for delayed works. |
| 2959 | * | 2986 | * |
| 2960 | * RETURNS: | 2987 | * Return: |
| 2961 | * %true if @dwork was pending, %false otherwise. | 2988 | * %true if @dwork was pending, %false otherwise. |
| 2962 | */ | 2989 | */ |
| 2963 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | 2990 | bool cancel_delayed_work_sync(struct delayed_work *dwork) |
| @@ -2974,7 +3001,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); | |||
| 2974 | * system workqueue and blocks until all CPUs have completed. | 3001 | * system workqueue and blocks until all CPUs have completed. |
| 2975 | * schedule_on_each_cpu() is very slow. | 3002 | * schedule_on_each_cpu() is very slow. |
| 2976 | * | 3003 | * |
| 2977 | * RETURNS: | 3004 | * Return: |
| 2978 | * 0 on success, -errno on failure. | 3005 | * 0 on success, -errno on failure. |
| 2979 | */ | 3006 | */ |
| 2980 | int schedule_on_each_cpu(work_func_t func) | 3007 | int schedule_on_each_cpu(work_func_t func) |
| @@ -3042,7 +3069,7 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
| 3042 | * Executes the function immediately if process context is available, | 3069 | * Executes the function immediately if process context is available, |
| 3043 | * otherwise schedules the function for delayed execution. | 3070 | * otherwise schedules the function for delayed execution. |
| 3044 | * | 3071 | * |
| 3045 | * Returns: 0 - function was executed | 3072 | * Return: 0 - function was executed |
| 3046 | * 1 - function was scheduled for execution | 3073 | * 1 - function was scheduled for execution |
| 3047 | */ | 3074 | */ |
| 3048 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 3075 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
| @@ -3086,25 +3113,26 @@ static struct workqueue_struct *dev_to_wq(struct device *dev) | |||
| 3086 | return wq_dev->wq; | 3113 | return wq_dev->wq; |
| 3087 | } | 3114 | } |
| 3088 | 3115 | ||
| 3089 | static ssize_t wq_per_cpu_show(struct device *dev, | 3116 | static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, |
| 3090 | struct device_attribute *attr, char *buf) | 3117 | char *buf) |
| 3091 | { | 3118 | { |
| 3092 | struct workqueue_struct *wq = dev_to_wq(dev); | 3119 | struct workqueue_struct *wq = dev_to_wq(dev); |
| 3093 | 3120 | ||
| 3094 | return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); | 3121 | return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); |
| 3095 | } | 3122 | } |
| 3123 | static DEVICE_ATTR_RO(per_cpu); | ||
| 3096 | 3124 | ||
| 3097 | static ssize_t wq_max_active_show(struct device *dev, | 3125 | static ssize_t max_active_show(struct device *dev, |
| 3098 | struct device_attribute *attr, char *buf) | 3126 | struct device_attribute *attr, char *buf) |
| 3099 | { | 3127 | { |
| 3100 | struct workqueue_struct *wq = dev_to_wq(dev); | 3128 | struct workqueue_struct *wq = dev_to_wq(dev); |
| 3101 | 3129 | ||
| 3102 | return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); | 3130 | return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); |
| 3103 | } | 3131 | } |
| 3104 | 3132 | ||
| 3105 | static ssize_t wq_max_active_store(struct device *dev, | 3133 | static ssize_t max_active_store(struct device *dev, |
| 3106 | struct device_attribute *attr, | 3134 | struct device_attribute *attr, const char *buf, |
| 3107 | const char *buf, size_t count) | 3135 | size_t count) |
| 3108 | { | 3136 | { |
| 3109 | struct workqueue_struct *wq = dev_to_wq(dev); | 3137 | struct workqueue_struct *wq = dev_to_wq(dev); |
| 3110 | int val; | 3138 | int val; |
| @@ -3115,12 +3143,14 @@ static ssize_t wq_max_active_store(struct device *dev, | |||
| 3115 | workqueue_set_max_active(wq, val); | 3143 | workqueue_set_max_active(wq, val); |
| 3116 | return count; | 3144 | return count; |
| 3117 | } | 3145 | } |
| 3146 | static DEVICE_ATTR_RW(max_active); | ||
| 3118 | 3147 | ||
| 3119 | static struct device_attribute wq_sysfs_attrs[] = { | 3148 | static struct attribute *wq_sysfs_attrs[] = { |
| 3120 | __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL), | 3149 | &dev_attr_per_cpu.attr, |
| 3121 | __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store), | 3150 | &dev_attr_max_active.attr, |
| 3122 | __ATTR_NULL, | 3151 | NULL, |
| 3123 | }; | 3152 | }; |
| 3153 | ATTRIBUTE_GROUPS(wq_sysfs); | ||
| 3124 | 3154 | ||
| 3125 | static ssize_t wq_pool_ids_show(struct device *dev, | 3155 | static ssize_t wq_pool_ids_show(struct device *dev, |
| 3126 | struct device_attribute *attr, char *buf) | 3156 | struct device_attribute *attr, char *buf) |
| @@ -3270,7 +3300,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = { | |||
| 3270 | 3300 | ||
| 3271 | static struct bus_type wq_subsys = { | 3301 | static struct bus_type wq_subsys = { |
| 3272 | .name = "workqueue", | 3302 | .name = "workqueue", |
| 3273 | .dev_attrs = wq_sysfs_attrs, | 3303 | .dev_groups = wq_sysfs_groups, |
| 3274 | }; | 3304 | }; |
| 3275 | 3305 | ||
| 3276 | static int __init wq_sysfs_init(void) | 3306 | static int __init wq_sysfs_init(void) |
| @@ -3299,7 +3329,7 @@ static void wq_device_release(struct device *dev) | |||
| 3299 | * apply_workqueue_attrs() may race against userland updating the | 3329 | * apply_workqueue_attrs() may race against userland updating the |
| 3300 | * attributes. | 3330 | * attributes. |
| 3301 | * | 3331 | * |
| 3302 | * Returns 0 on success, -errno on failure. | 3332 | * Return: 0 on success, -errno on failure. |
| 3303 | */ | 3333 | */ |
| 3304 | int workqueue_sysfs_register(struct workqueue_struct *wq) | 3334 | int workqueue_sysfs_register(struct workqueue_struct *wq) |
| 3305 | { | 3335 | { |
| @@ -3392,7 +3422,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) | |||
| 3392 | * @gfp_mask: allocation mask to use | 3422 | * @gfp_mask: allocation mask to use |
| 3393 | * | 3423 | * |
| 3394 | * Allocate a new workqueue_attrs, initialize with default settings and | 3424 | * Allocate a new workqueue_attrs, initialize with default settings and |
| 3395 | * return it. Returns NULL on failure. | 3425 | * return it. |
| 3426 | * | ||
| 3427 | * Return: The allocated new workqueue_attr on success. %NULL on failure. | ||
| 3396 | */ | 3428 | */ |
| 3397 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) | 3429 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) |
| 3398 | { | 3430 | { |
| @@ -3451,7 +3483,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, | |||
| 3451 | * @pool: worker_pool to initialize | 3483 | * @pool: worker_pool to initialize |
| 3452 | * | 3484 | * |
| 3453 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. | 3485 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. |
| 3454 | * Returns 0 on success, -errno on failure. Even on failure, all fields | 3486 | * |
| 3487 | * Return: 0 on success, -errno on failure. Even on failure, all fields | ||
| 3455 | * inside @pool proper are initialized and put_unbound_pool() can be called | 3488 | * inside @pool proper are initialized and put_unbound_pool() can be called |
| 3456 | * on @pool safely to release it. | 3489 | * on @pool safely to release it. |
| 3457 | */ | 3490 | */ |
| @@ -3558,9 +3591,12 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
| 3558 | * Obtain a worker_pool which has the same attributes as @attrs, bump the | 3591 | * Obtain a worker_pool which has the same attributes as @attrs, bump the |
| 3559 | * reference count and return it. If there already is a matching | 3592 | * reference count and return it. If there already is a matching |
| 3560 | * worker_pool, it will be used; otherwise, this function attempts to | 3593 | * worker_pool, it will be used; otherwise, this function attempts to |
| 3561 | * create a new one. On failure, returns NULL. | 3594 | * create a new one. |
| 3562 | * | 3595 | * |
| 3563 | * Should be called with wq_pool_mutex held. | 3596 | * Should be called with wq_pool_mutex held. |
| 3597 | * | ||
| 3598 | * Return: On success, a worker_pool with the same attributes as @attrs. | ||
| 3599 | * On failure, %NULL. | ||
| 3564 | */ | 3600 | */ |
| 3565 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | 3601 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) |
| 3566 | { | 3602 | { |
| @@ -3796,9 +3832,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
| 3796 | * | 3832 | * |
| 3797 | * Calculate the cpumask a workqueue with @attrs should use on @node. If | 3833 | * Calculate the cpumask a workqueue with @attrs should use on @node. If |
| 3798 | * @cpu_going_down is >= 0, that cpu is considered offline during | 3834 | * @cpu_going_down is >= 0, that cpu is considered offline during |
| 3799 | * calculation. The result is stored in @cpumask. This function returns | 3835 | * calculation. The result is stored in @cpumask. |
| 3800 | * %true if the resulting @cpumask is different from @attrs->cpumask, | ||
| 3801 | * %false if equal. | ||
| 3802 | * | 3836 | * |
| 3803 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If | 3837 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If |
| 3804 | * enabled and @node has online CPUs requested by @attrs, the returned | 3838 | * enabled and @node has online CPUs requested by @attrs, the returned |
| @@ -3807,6 +3841,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
| 3807 | * | 3841 | * |
| 3808 | * The caller is responsible for ensuring that the cpumask of @node stays | 3842 | * The caller is responsible for ensuring that the cpumask of @node stays |
| 3809 | * stable. | 3843 | * stable. |
| 3844 | * | ||
| 3845 | * Return: %true if the resulting @cpumask is different from @attrs->cpumask, | ||
| 3846 | * %false if equal. | ||
| 3810 | */ | 3847 | */ |
| 3811 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, | 3848 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, |
| 3812 | int cpu_going_down, cpumask_t *cpumask) | 3849 | int cpu_going_down, cpumask_t *cpumask) |
| @@ -3860,8 +3897,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, | |||
| 3860 | * items finish. Note that a work item which repeatedly requeues itself | 3897 | * items finish. Note that a work item which repeatedly requeues itself |
| 3861 | * back-to-back will stay on its current pwq. | 3898 | * back-to-back will stay on its current pwq. |
| 3862 | * | 3899 | * |
| 3863 | * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on | 3900 | * Performs GFP_KERNEL allocations. |
| 3864 | * failure. | 3901 | * |
| 3902 | * Return: 0 on success and -errno on failure. | ||
| 3865 | */ | 3903 | */ |
| 3866 | int apply_workqueue_attrs(struct workqueue_struct *wq, | 3904 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
| 3867 | const struct workqueue_attrs *attrs) | 3905 | const struct workqueue_attrs *attrs) |
| @@ -4329,6 +4367,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active); | |||
| 4329 | * | 4367 | * |
| 4330 | * Determine whether %current is a workqueue rescuer. Can be used from | 4368 | * Determine whether %current is a workqueue rescuer. Can be used from |
| 4331 | * work functions to determine whether it's being run off the rescuer task. | 4369 | * work functions to determine whether it's being run off the rescuer task. |
| 4370 | * | ||
| 4371 | * Return: %true if %current is a workqueue rescuer. %false otherwise. | ||
| 4332 | */ | 4372 | */ |
| 4333 | bool current_is_workqueue_rescuer(void) | 4373 | bool current_is_workqueue_rescuer(void) |
| 4334 | { | 4374 | { |
| @@ -4352,7 +4392,7 @@ bool current_is_workqueue_rescuer(void) | |||
| 4352 | * workqueue being congested on one CPU doesn't mean the workqueue is also | 4392 | * workqueue being congested on one CPU doesn't mean the workqueue is also |
| 4353 | * contested on other CPUs / NUMA nodes. | 4393 | * contested on other CPUs / NUMA nodes. |
| 4354 | * | 4394 | * |
| 4355 | * RETURNS: | 4395 | * Return: |
| 4356 | * %true if congested, %false otherwise. | 4396 | * %true if congested, %false otherwise. |
| 4357 | */ | 4397 | */ |
| 4358 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) | 4398 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) |
| @@ -4385,7 +4425,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); | |||
| 4385 | * synchronization around this function and the test result is | 4425 | * synchronization around this function and the test result is |
| 4386 | * unreliable and only useful as advisory hints or for debugging. | 4426 | * unreliable and only useful as advisory hints or for debugging. |
| 4387 | * | 4427 | * |
| 4388 | * RETURNS: | 4428 | * Return: |
| 4389 | * OR'd bitmask of WORK_BUSY_* bits. | 4429 | * OR'd bitmask of WORK_BUSY_* bits. |
| 4390 | */ | 4430 | */ |
| 4391 | unsigned int work_busy(struct work_struct *work) | 4431 | unsigned int work_busy(struct work_struct *work) |
| @@ -4763,9 +4803,10 @@ static void work_for_cpu_fn(struct work_struct *work) | |||
| 4763 | * @fn: the function to run | 4803 | * @fn: the function to run |
| 4764 | * @arg: the function arg | 4804 | * @arg: the function arg |
| 4765 | * | 4805 | * |
| 4766 | * This will return the value @fn returns. | ||
| 4767 | * It is up to the caller to ensure that the cpu doesn't go offline. | 4806 | * It is up to the caller to ensure that the cpu doesn't go offline. |
| 4768 | * The caller must not hold any locks which would prevent @fn from completing. | 4807 | * The caller must not hold any locks which would prevent @fn from completing. |
| 4808 | * | ||
| 4809 | * Return: The value @fn returns. | ||
| 4769 | */ | 4810 | */ |
| 4770 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 4811 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
| 4771 | { | 4812 | { |
| @@ -4837,7 +4878,7 @@ void freeze_workqueues_begin(void) | |||
| 4837 | * CONTEXT: | 4878 | * CONTEXT: |
| 4838 | * Grabs and releases wq_pool_mutex. | 4879 | * Grabs and releases wq_pool_mutex. |
| 4839 | * | 4880 | * |
| 4840 | * RETURNS: | 4881 | * Return: |
| 4841 | * %true if some freezable workqueues are still busy. %false if freezing | 4882 | * %true if some freezable workqueues are still busy. %false if freezing |
| 4842 | * is complete. | 4883 | * is complete. |
| 4843 | */ | 4884 | */ |
