diff options
author | Tejun Heo <tj@kernel.org> | 2011-04-05 12:01:44 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-05-20 07:54:46 -0400 |
commit | 9c5a2ba70251ecaab18c7a83e38b3c620223476c (patch) | |
tree | ece2c035b761af1332ea027f32443a6d05b16bc9 /kernel/workqueue.c | |
parent | 2543a87108d2af7d48a43b3d6685c2b1ea279e36 (diff) |
workqueue: separate out drain_workqueue() from destroy_workqueue()
There are users which want to drain workqueues without destroying it.
Separate out drain functionality from destroy_workqueue() into
drain_workqueue() and make it accessible to workqueue users.
To guarantee forward-progress, only chain queueing is allowed while
drain is in progress. If a new work item which isn't chained from the
running or pending work items is queued while draining is in progress,
WARN_ON_ONCE() is triggered.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: James Bottomley <James.Bottomley@hansenpartnership.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 81 |
1 files changed, 53 insertions, 28 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e3378e8d3a5c..25c8afeaeae8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -221,7 +221,7 @@ typedef unsigned long mayday_mask_t; | |||
221 | * per-CPU workqueues: | 221 | * per-CPU workqueues: |
222 | */ | 222 | */ |
223 | struct workqueue_struct { | 223 | struct workqueue_struct { |
224 | unsigned int flags; /* I: WQ_* flags */ | 224 | unsigned int flags; /* W: WQ_* flags */ |
225 | union { | 225 | union { |
226 | struct cpu_workqueue_struct __percpu *pcpu; | 226 | struct cpu_workqueue_struct __percpu *pcpu; |
227 | struct cpu_workqueue_struct *single; | 227 | struct cpu_workqueue_struct *single; |
@@ -240,6 +240,7 @@ struct workqueue_struct { | |||
240 | mayday_mask_t mayday_mask; /* cpus requesting rescue */ | 240 | mayday_mask_t mayday_mask; /* cpus requesting rescue */ |
241 | struct worker *rescuer; /* I: rescue worker */ | 241 | struct worker *rescuer; /* I: rescue worker */ |
242 | 242 | ||
243 | int nr_drainers; /* W: drain in progress */ | ||
243 | int saved_max_active; /* W: saved cwq max_active */ | 244 | int saved_max_active; /* W: saved cwq max_active */ |
244 | const char *name; /* I: workqueue name */ | 245 | const char *name; /* I: workqueue name */ |
245 | #ifdef CONFIG_LOCKDEP | 246 | #ifdef CONFIG_LOCKDEP |
@@ -990,7 +991,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
990 | debug_work_activate(work); | 991 | debug_work_activate(work); |
991 | 992 | ||
992 | /* if dying, only works from the same workqueue are allowed */ | 993 | /* if dying, only works from the same workqueue are allowed */ |
993 | if (unlikely(wq->flags & WQ_DYING) && | 994 | if (unlikely(wq->flags & WQ_DRAINING) && |
994 | WARN_ON_ONCE(!is_chained_work(wq))) | 995 | WARN_ON_ONCE(!is_chained_work(wq))) |
995 | return; | 996 | return; |
996 | 997 | ||
@@ -2381,6 +2382,54 @@ out_unlock: | |||
2381 | } | 2382 | } |
2382 | EXPORT_SYMBOL_GPL(flush_workqueue); | 2383 | EXPORT_SYMBOL_GPL(flush_workqueue); |
2383 | 2384 | ||
2385 | /** | ||
2386 | * drain_workqueue - drain a workqueue | ||
2387 | * @wq: workqueue to drain | ||
2388 | * | ||
2389 | * Wait until the workqueue becomes empty. While draining is in progress, | ||
2390 | * only chain queueing is allowed. IOW, only currently pending or running | ||
2391 | * work items on @wq can queue further work items on it. @wq is flushed | ||
2392 | * repeatedly until it becomes empty. The number of flushing is detemined | ||
2393 | * by the depth of chaining and should be relatively short. Whine if it | ||
2394 | * takes too long. | ||
2395 | */ | ||
2396 | void drain_workqueue(struct workqueue_struct *wq) | ||
2397 | { | ||
2398 | unsigned int flush_cnt = 0; | ||
2399 | unsigned int cpu; | ||
2400 | |||
2401 | /* | ||
2402 | * __queue_work() needs to test whether there are drainers, is much | ||
2403 | * hotter than drain_workqueue() and already looks at @wq->flags. | ||
2404 | * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. | ||
2405 | */ | ||
2406 | spin_lock(&workqueue_lock); | ||
2407 | if (!wq->nr_drainers++) | ||
2408 | wq->flags |= WQ_DRAINING; | ||
2409 | spin_unlock(&workqueue_lock); | ||
2410 | reflush: | ||
2411 | flush_workqueue(wq); | ||
2412 | |||
2413 | for_each_cwq_cpu(cpu, wq) { | ||
2414 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
2415 | |||
2416 | if (!cwq->nr_active && list_empty(&cwq->delayed_works)) | ||
2417 | continue; | ||
2418 | |||
2419 | if (++flush_cnt == 10 || | ||
2420 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | ||
2421 | pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n", | ||
2422 | wq->name, flush_cnt); | ||
2423 | goto reflush; | ||
2424 | } | ||
2425 | |||
2426 | spin_lock(&workqueue_lock); | ||
2427 | if (!--wq->nr_drainers) | ||
2428 | wq->flags &= ~WQ_DRAINING; | ||
2429 | spin_unlock(&workqueue_lock); | ||
2430 | } | ||
2431 | EXPORT_SYMBOL_GPL(drain_workqueue); | ||
2432 | |||
2384 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | 2433 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, |
2385 | bool wait_executing) | 2434 | bool wait_executing) |
2386 | { | 2435 | { |
@@ -3011,34 +3060,10 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key); | |||
3011 | */ | 3060 | */ |
3012 | void destroy_workqueue(struct workqueue_struct *wq) | 3061 | void destroy_workqueue(struct workqueue_struct *wq) |
3013 | { | 3062 | { |
3014 | unsigned int flush_cnt = 0; | ||
3015 | unsigned int cpu; | 3063 | unsigned int cpu; |
3016 | 3064 | ||
3017 | /* | 3065 | /* drain it before proceeding with destruction */ |
3018 | * Mark @wq dying and drain all pending works. Once WQ_DYING is | 3066 | drain_workqueue(wq); |
3019 | * set, only chain queueing is allowed. IOW, only currently | ||
3020 | * pending or running work items on @wq can queue further work | ||
3021 | * items on it. @wq is flushed repeatedly until it becomes empty. | ||
3022 | * The number of flushing is detemined by the depth of chaining and | ||
3023 | * should be relatively short. Whine if it takes too long. | ||
3024 | */ | ||
3025 | wq->flags |= WQ_DYING; | ||
3026 | reflush: | ||
3027 | flush_workqueue(wq); | ||
3028 | |||
3029 | for_each_cwq_cpu(cpu, wq) { | ||
3030 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
3031 | |||
3032 | if (!cwq->nr_active && list_empty(&cwq->delayed_works)) | ||
3033 | continue; | ||
3034 | |||
3035 | if (++flush_cnt == 10 || | ||
3036 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | ||
3037 | printk(KERN_WARNING "workqueue %s: flush on " | ||
3038 | "destruction isn't complete after %u tries\n", | ||
3039 | wq->name, flush_cnt); | ||
3040 | goto reflush; | ||
3041 | } | ||
3042 | 3067 | ||
3043 | /* | 3068 | /* |
3044 | * wq list is used to freeze wq, remove from list after | 3069 | * wq list is used to freeze wq, remove from list after |