diff options
| author | Tejun Heo <tj@kernel.org> | 2013-03-13 19:51:36 -0400 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-03-13 19:51:36 -0400 |
| commit | c5aa87bbf4b23f5e4f167489406daeb0ed275c47 (patch) | |
| tree | c0a8218033a0dd61fb840cbd300287405043ba70 /kernel/workqueue.c | |
| parent | 983ca25e738ee0c9c5435a503a6bb0034d4552b0 (diff) | |
workqueue: update comments and a warning message
* Update incorrect and add missing synchronization labels.
* Update incorrect or misleading comments. Add new comments where
clarification is necessary. Reformat / rephrase some comments.
* drain_workqueue() can be used separately from destroy_workqueue()
but its warning message was incorrectly referring to destruction.
Other than the warning message change, this patch doesn't make any
functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 84 |
1 files changed, 44 insertions, 40 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 756761480a1a..248a1e95b577 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -145,7 +145,7 @@ struct worker_pool { | |||
| 145 | struct timer_list idle_timer; /* L: worker idle timeout */ | 145 | struct timer_list idle_timer; /* L: worker idle timeout */ |
| 146 | struct timer_list mayday_timer; /* L: SOS timer for workers */ | 146 | struct timer_list mayday_timer; /* L: SOS timer for workers */ |
| 147 | 147 | ||
| 148 | /* workers are chained either in busy_hash or idle_list */ | 148 | /* a workers is either on busy_hash or idle_list, or the manager */ |
| 149 | DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); | 149 | DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); |
| 150 | /* L: hash of busy workers */ | 150 | /* L: hash of busy workers */ |
| 151 | 151 | ||
| @@ -154,8 +154,8 @@ struct worker_pool { | |||
| 154 | struct ida worker_ida; /* L: for worker IDs */ | 154 | struct ida worker_ida; /* L: for worker IDs */ |
| 155 | 155 | ||
| 156 | struct workqueue_attrs *attrs; /* I: worker attributes */ | 156 | struct workqueue_attrs *attrs; /* I: worker attributes */ |
| 157 | struct hlist_node hash_node; /* R: unbound_pool_hash node */ | 157 | struct hlist_node hash_node; /* W: unbound_pool_hash node */ |
| 158 | int refcnt; /* refcnt for unbound pools */ | 158 | int refcnt; /* W: refcnt for unbound pools */ |
| 159 | 159 | ||
| 160 | /* | 160 | /* |
| 161 | * The current concurrency level. As it's likely to be accessed | 161 | * The current concurrency level. As it's likely to be accessed |
| @@ -213,8 +213,8 @@ struct wq_flusher { | |||
| 213 | struct wq_device; | 213 | struct wq_device; |
| 214 | 214 | ||
| 215 | /* | 215 | /* |
| 216 | * The externally visible workqueue abstraction is an array of | 216 | * The externally visible workqueue. It relays the issued work items to |
| 217 | * per-CPU workqueues: | 217 | * the appropriate worker_pool through its pool_workqueues. |
| 218 | */ | 218 | */ |
| 219 | struct workqueue_struct { | 219 | struct workqueue_struct { |
| 220 | unsigned int flags; /* W: WQ_* flags */ | 220 | unsigned int flags; /* W: WQ_* flags */ |
| @@ -247,9 +247,10 @@ struct workqueue_struct { | |||
| 247 | 247 | ||
| 248 | static struct kmem_cache *pwq_cache; | 248 | static struct kmem_cache *pwq_cache; |
| 249 | 249 | ||
| 250 | /* hash of all unbound pools keyed by pool->attrs */ | 250 | /* W: hash of all unbound pools keyed by pool->attrs */ |
| 251 | static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); | 251 | static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); |
| 252 | 252 | ||
| 253 | /* I: attributes used when instantiating standard unbound pools on demand */ | ||
| 253 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; | 254 | static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; |
| 254 | 255 | ||
| 255 | struct workqueue_struct *system_wq __read_mostly; | 256 | struct workqueue_struct *system_wq __read_mostly; |
| @@ -434,16 +435,13 @@ static DEFINE_SPINLOCK(workqueue_lock); | |||
| 434 | static LIST_HEAD(workqueues); | 435 | static LIST_HEAD(workqueues); |
| 435 | static bool workqueue_freezing; /* W: have wqs started freezing? */ | 436 | static bool workqueue_freezing; /* W: have wqs started freezing? */ |
| 436 | 437 | ||
| 437 | /* | 438 | /* the per-cpu worker pools */ |
| 438 | * The CPU and unbound standard worker pools. The unbound ones have | ||
| 439 | * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set. | ||
| 440 | */ | ||
| 441 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], | 439 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], |
| 442 | cpu_worker_pools); | 440 | cpu_worker_pools); |
| 443 | 441 | ||
| 444 | /* | 442 | /* |
| 445 | * idr of all pools. Modifications are protected by workqueue_lock. Read | 443 | * R: idr of all pools. Modifications are protected by workqueue_lock. |
| 446 | * accesses are protected by sched-RCU protected. | 444 | * Read accesses are protected by sched-RCU protected. |
| 447 | */ | 445 | */ |
| 448 | static DEFINE_IDR(worker_pool_idr); | 446 | static DEFINE_IDR(worker_pool_idr); |
| 449 | 447 | ||
| @@ -890,13 +888,12 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
| 890 | * recycled work item as currently executing and make it wait until the | 888 | * recycled work item as currently executing and make it wait until the |
| 891 | * current execution finishes, introducing an unwanted dependency. | 889 | * current execution finishes, introducing an unwanted dependency. |
| 892 | * | 890 | * |
| 893 | * This function checks the work item address, work function and workqueue | 891 | * This function checks the work item address and work function to avoid |
| 894 | * to avoid false positives. Note that this isn't complete as one may | 892 | * false positives. Note that this isn't complete as one may construct a |
| 895 | * construct a work function which can introduce dependency onto itself | 893 | * work function which can introduce dependency onto itself through a |
| 896 | * through a recycled work item. Well, if somebody wants to shoot oneself | 894 | * recycled work item. Well, if somebody wants to shoot oneself in the |
| 897 | * in the foot that badly, there's only so much we can do, and if such | 895 | * foot that badly, there's only so much we can do, and if such deadlock |
| 898 | * deadlock actually occurs, it should be easy to locate the culprit work | 896 | * actually occurs, it should be easy to locate the culprit work function. |
| 899 | * function. | ||
| 900 | * | 897 | * |
| 901 | * CONTEXT: | 898 | * CONTEXT: |
| 902 | * spin_lock_irq(pool->lock). | 899 | * spin_lock_irq(pool->lock). |
| @@ -1187,9 +1184,9 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, | |||
| 1187 | get_pwq(pwq); | 1184 | get_pwq(pwq); |
| 1188 | 1185 | ||
| 1189 | /* | 1186 | /* |
| 1190 | * Ensure either worker_sched_deactivated() sees the above | 1187 | * Ensure either wq_worker_sleeping() sees the above |
| 1191 | * list_add_tail() or we see zero nr_running to avoid workers | 1188 | * list_add_tail() or we see zero nr_running to avoid workers lying |
| 1192 | * lying around lazily while there are works to be processed. | 1189 | * around lazily while there are works to be processed. |
| 1193 | */ | 1190 | */ |
| 1194 | smp_mb(); | 1191 | smp_mb(); |
| 1195 | 1192 | ||
| @@ -1790,6 +1787,10 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
| 1790 | if (IS_ERR(worker->task)) | 1787 | if (IS_ERR(worker->task)) |
| 1791 | goto fail; | 1788 | goto fail; |
| 1792 | 1789 | ||
| 1790 | /* | ||
| 1791 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any | ||
| 1792 | * online CPUs. It'll be re-applied when any of the CPUs come up. | ||
| 1793 | */ | ||
| 1793 | set_user_nice(worker->task, pool->attrs->nice); | 1794 | set_user_nice(worker->task, pool->attrs->nice); |
| 1794 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); | 1795 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); |
| 1795 | 1796 | ||
| @@ -1950,8 +1951,8 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
| 1950 | * sent to all rescuers with works scheduled on @pool to resolve | 1951 | * sent to all rescuers with works scheduled on @pool to resolve |
| 1951 | * possible allocation deadlock. | 1952 | * possible allocation deadlock. |
| 1952 | * | 1953 | * |
| 1953 | * On return, need_to_create_worker() is guaranteed to be false and | 1954 | * On return, need_to_create_worker() is guaranteed to be %false and |
| 1954 | * may_start_working() true. | 1955 | * may_start_working() %true. |
| 1955 | * | 1956 | * |
| 1956 | * LOCKING: | 1957 | * LOCKING: |
| 1957 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 1958 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
| @@ -1959,7 +1960,7 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
| 1959 | * manager. | 1960 | * manager. |
| 1960 | * | 1961 | * |
| 1961 | * RETURNS: | 1962 | * RETURNS: |
| 1962 | * false if no action was taken and pool->lock stayed locked, true | 1963 | * %false if no action was taken and pool->lock stayed locked, %true |
| 1963 | * otherwise. | 1964 | * otherwise. |
| 1964 | */ | 1965 | */ |
| 1965 | static bool maybe_create_worker(struct worker_pool *pool) | 1966 | static bool maybe_create_worker(struct worker_pool *pool) |
| @@ -2016,7 +2017,7 @@ restart: | |||
| 2016 | * multiple times. Called only from manager. | 2017 | * multiple times. Called only from manager. |
| 2017 | * | 2018 | * |
| 2018 | * RETURNS: | 2019 | * RETURNS: |
| 2019 | * false if no action was taken and pool->lock stayed locked, true | 2020 | * %false if no action was taken and pool->lock stayed locked, %true |
| 2020 | * otherwise. | 2021 | * otherwise. |
| 2021 | */ | 2022 | */ |
| 2022 | static bool maybe_destroy_workers(struct worker_pool *pool) | 2023 | static bool maybe_destroy_workers(struct worker_pool *pool) |
| @@ -2268,11 +2269,11 @@ static void process_scheduled_works(struct worker *worker) | |||
| 2268 | * worker_thread - the worker thread function | 2269 | * worker_thread - the worker thread function |
| 2269 | * @__worker: self | 2270 | * @__worker: self |
| 2270 | * | 2271 | * |
| 2271 | * The worker thread function. There are NR_CPU_WORKER_POOLS dynamic pools | 2272 | * The worker thread function. All workers belong to a worker_pool - |
| 2272 | * of these per each cpu. These workers process all works regardless of | 2273 | * either a per-cpu one or dynamic unbound one. These workers process all |
| 2273 | * their specific target workqueue. The only exception is works which | 2274 | * work items regardless of their specific target workqueue. The only |
| 2274 | * belong to workqueues with a rescuer which will be explained in | 2275 | * exception is work items which belong to workqueues with a rescuer which |
| 2275 | * rescuer_thread(). | 2276 | * will be explained in rescuer_thread(). |
| 2276 | */ | 2277 | */ |
| 2277 | static int worker_thread(void *__worker) | 2278 | static int worker_thread(void *__worker) |
| 2278 | { | 2279 | { |
| @@ -2600,11 +2601,8 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, | |||
| 2600 | * flush_workqueue - ensure that any scheduled work has run to completion. | 2601 | * flush_workqueue - ensure that any scheduled work has run to completion. |
| 2601 | * @wq: workqueue to flush | 2602 | * @wq: workqueue to flush |
| 2602 | * | 2603 | * |
| 2603 | * Forces execution of the workqueue and blocks until its completion. | 2604 | * This function sleeps until all work items which were queued on entry |
| 2604 | * This is typically used in driver shutdown handlers. | 2605 | * have finished execution, but it is not livelocked by new incoming ones. |
| 2605 | * | ||
| 2606 | * We sleep until all works which were queued on entry have been handled, | ||
| 2607 | * but we are not livelocked by new incoming ones. | ||
| 2608 | */ | 2606 | */ |
| 2609 | void flush_workqueue(struct workqueue_struct *wq) | 2607 | void flush_workqueue(struct workqueue_struct *wq) |
| 2610 | { | 2608 | { |
| @@ -2794,7 +2792,7 @@ reflush: | |||
| 2794 | 2792 | ||
| 2795 | if (++flush_cnt == 10 || | 2793 | if (++flush_cnt == 10 || |
| 2796 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | 2794 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) |
| 2797 | pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n", | 2795 | pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", |
| 2798 | wq->name, flush_cnt); | 2796 | wq->name, flush_cnt); |
| 2799 | 2797 | ||
| 2800 | local_irq_enable(); | 2798 | local_irq_enable(); |
| @@ -3576,7 +3574,9 @@ static void rcu_free_pool(struct rcu_head *rcu) | |||
| 3576 | * @pool: worker_pool to put | 3574 | * @pool: worker_pool to put |
| 3577 | * | 3575 | * |
| 3578 | * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU | 3576 | * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU |
| 3579 | * safe manner. | 3577 | * safe manner. get_unbound_pool() calls this function on its failure path |
| 3578 | * and this function should be able to release pools which went through, | ||
| 3579 | * successfully or not, init_worker_pool(). | ||
| 3580 | */ | 3580 | */ |
| 3581 | static void put_unbound_pool(struct worker_pool *pool) | 3581 | static void put_unbound_pool(struct worker_pool *pool) |
| 3582 | { | 3582 | { |
| @@ -3602,7 +3602,11 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
| 3602 | 3602 | ||
| 3603 | spin_unlock_irq(&workqueue_lock); | 3603 | spin_unlock_irq(&workqueue_lock); |
| 3604 | 3604 | ||
| 3605 | /* lock out manager and destroy all workers */ | 3605 | /* |
| 3606 | * Become the manager and destroy all workers. Grabbing | ||
| 3607 | * manager_arb prevents @pool's workers from blocking on | ||
| 3608 | * manager_mutex. | ||
| 3609 | */ | ||
| 3606 | mutex_lock(&pool->manager_arb); | 3610 | mutex_lock(&pool->manager_arb); |
| 3607 | spin_lock_irq(&pool->lock); | 3611 | spin_lock_irq(&pool->lock); |
| 3608 | 3612 | ||
| @@ -4339,7 +4343,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
| 4339 | * freeze_workqueues_begin - begin freezing workqueues | 4343 | * freeze_workqueues_begin - begin freezing workqueues |
| 4340 | * | 4344 | * |
| 4341 | * Start freezing workqueues. After this function returns, all freezable | 4345 | * Start freezing workqueues. After this function returns, all freezable |
| 4342 | * workqueues will queue new works to their frozen_works list instead of | 4346 | * workqueues will queue new works to their delayed_works list instead of |
| 4343 | * pool->worklist. | 4347 | * pool->worklist. |
| 4344 | * | 4348 | * |
| 4345 | * CONTEXT: | 4349 | * CONTEXT: |
