diff options
-rw-r--r-- | Documentation/feature-removal-schedule.txt | 10 | ||||
-rw-r--r-- | include/linux/workqueue.h | 18 | ||||
-rw-r--r-- | kernel/workqueue.c | 81 |
3 files changed, 55 insertions, 54 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index b1c921c27519..d59e71df5c5c 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -501,16 +501,6 @@ Who: NeilBrown <neilb@suse.de> | |||
501 | 501 | ||
502 | ---------------------------- | 502 | ---------------------------- |
503 | 503 | ||
504 | What: cancel_rearming_delayed_work[queue]() | ||
505 | When: 2.6.39 | ||
506 | |||
507 | Why: The functions have been superceded by cancel_delayed_work_sync() | ||
508 | quite some time ago. The conversion is trivial and there is no | ||
509 | in-kernel user left. | ||
510 | Who: Tejun Heo <tj@kernel.org> | ||
511 | |||
512 | ---------------------------- | ||
513 | |||
514 | What: Legacy, non-standard chassis intrusion detection interface. | 504 | What: Legacy, non-standard chassis intrusion detection interface. |
515 | When: June 2011 | 505 | When: June 2011 |
516 | Why: The adm9240, w83792d and w83793 hardware monitoring drivers have | 506 | Why: The adm9240, w83792d and w83793 hardware monitoring drivers have |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f584aba78ca9..2be2887c6958 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -255,7 +255,7 @@ enum { | |||
255 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 255 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
257 | 257 | ||
258 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ | 258 | WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */ |
259 | WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ | 259 | WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ |
260 | 260 | ||
261 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | 261 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
@@ -355,6 +355,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
355 | struct delayed_work *work, unsigned long delay); | 355 | struct delayed_work *work, unsigned long delay); |
356 | 356 | ||
357 | extern void flush_workqueue(struct workqueue_struct *wq); | 357 | extern void flush_workqueue(struct workqueue_struct *wq); |
358 | extern void drain_workqueue(struct workqueue_struct *wq); | ||
358 | extern void flush_scheduled_work(void); | 359 | extern void flush_scheduled_work(void); |
359 | 360 | ||
360 | extern int schedule_work(struct work_struct *work); | 361 | extern int schedule_work(struct work_struct *work); |
@@ -412,21 +413,6 @@ static inline bool __cancel_delayed_work(struct delayed_work *work) | |||
412 | return ret; | 413 | return ret; |
413 | } | 414 | } |
414 | 415 | ||
415 | /* Obsolete. use cancel_delayed_work_sync() */ | ||
416 | static inline __deprecated | ||
417 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | ||
418 | struct delayed_work *work) | ||
419 | { | ||
420 | cancel_delayed_work_sync(work); | ||
421 | } | ||
422 | |||
423 | /* Obsolete. use cancel_delayed_work_sync() */ | ||
424 | static inline __deprecated | ||
425 | void cancel_rearming_delayed_work(struct delayed_work *work) | ||
426 | { | ||
427 | cancel_delayed_work_sync(work); | ||
428 | } | ||
429 | |||
430 | #ifndef CONFIG_SMP | 416 | #ifndef CONFIG_SMP |
431 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 417 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
432 | { | 418 | { |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0400553f0d04..25fb1b0e53fa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -221,7 +221,7 @@ typedef unsigned long mayday_mask_t; | |||
221 | * per-CPU workqueues: | 221 | * per-CPU workqueues: |
222 | */ | 222 | */ |
223 | struct workqueue_struct { | 223 | struct workqueue_struct { |
224 | unsigned int flags; /* I: WQ_* flags */ | 224 | unsigned int flags; /* W: WQ_* flags */ |
225 | union { | 225 | union { |
226 | struct cpu_workqueue_struct __percpu *pcpu; | 226 | struct cpu_workqueue_struct __percpu *pcpu; |
227 | struct cpu_workqueue_struct *single; | 227 | struct cpu_workqueue_struct *single; |
@@ -240,6 +240,7 @@ struct workqueue_struct { | |||
240 | mayday_mask_t mayday_mask; /* cpus requesting rescue */ | 240 | mayday_mask_t mayday_mask; /* cpus requesting rescue */ |
241 | struct worker *rescuer; /* I: rescue worker */ | 241 | struct worker *rescuer; /* I: rescue worker */ |
242 | 242 | ||
243 | int nr_drainers; /* W: drain in progress */ | ||
243 | int saved_max_active; /* W: saved cwq max_active */ | 244 | int saved_max_active; /* W: saved cwq max_active */ |
244 | const char *name; /* I: workqueue name */ | 245 | const char *name; /* I: workqueue name */ |
245 | #ifdef CONFIG_LOCKDEP | 246 | #ifdef CONFIG_LOCKDEP |
@@ -990,7 +991,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
990 | debug_work_activate(work); | 991 | debug_work_activate(work); |
991 | 992 | ||
992 | /* if dying, only works from the same workqueue are allowed */ | 993 | /* if dying, only works from the same workqueue are allowed */ |
993 | if (unlikely(wq->flags & WQ_DYING) && | 994 | if (unlikely(wq->flags & WQ_DRAINING) && |
994 | WARN_ON_ONCE(!is_chained_work(wq))) | 995 | WARN_ON_ONCE(!is_chained_work(wq))) |
995 | return; | 996 | return; |
996 | 997 | ||
@@ -2381,6 +2382,54 @@ out_unlock: | |||
2381 | } | 2382 | } |
2382 | EXPORT_SYMBOL_GPL(flush_workqueue); | 2383 | EXPORT_SYMBOL_GPL(flush_workqueue); |
2383 | 2384 | ||
2385 | /** | ||
2386 | * drain_workqueue - drain a workqueue | ||
2387 | * @wq: workqueue to drain | ||
2388 | * | ||
2389 | * Wait until the workqueue becomes empty. While draining is in progress, | ||
2390 | * only chain queueing is allowed. IOW, only currently pending or running | ||
2391 | * work items on @wq can queue further work items on it. @wq is flushed | ||
2392 | * repeatedly until it becomes empty. The number of flushing is detemined | ||
2393 | * by the depth of chaining and should be relatively short. Whine if it | ||
2394 | * takes too long. | ||
2395 | */ | ||
2396 | void drain_workqueue(struct workqueue_struct *wq) | ||
2397 | { | ||
2398 | unsigned int flush_cnt = 0; | ||
2399 | unsigned int cpu; | ||
2400 | |||
2401 | /* | ||
2402 | * __queue_work() needs to test whether there are drainers, is much | ||
2403 | * hotter than drain_workqueue() and already looks at @wq->flags. | ||
2404 | * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. | ||
2405 | */ | ||
2406 | spin_lock(&workqueue_lock); | ||
2407 | if (!wq->nr_drainers++) | ||
2408 | wq->flags |= WQ_DRAINING; | ||
2409 | spin_unlock(&workqueue_lock); | ||
2410 | reflush: | ||
2411 | flush_workqueue(wq); | ||
2412 | |||
2413 | for_each_cwq_cpu(cpu, wq) { | ||
2414 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
2415 | |||
2416 | if (!cwq->nr_active && list_empty(&cwq->delayed_works)) | ||
2417 | continue; | ||
2418 | |||
2419 | if (++flush_cnt == 10 || | ||
2420 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | ||
2421 | pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n", | ||
2422 | wq->name, flush_cnt); | ||
2423 | goto reflush; | ||
2424 | } | ||
2425 | |||
2426 | spin_lock(&workqueue_lock); | ||
2427 | if (!--wq->nr_drainers) | ||
2428 | wq->flags &= ~WQ_DRAINING; | ||
2429 | spin_unlock(&workqueue_lock); | ||
2430 | } | ||
2431 | EXPORT_SYMBOL_GPL(drain_workqueue); | ||
2432 | |||
2384 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | 2433 | static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, |
2385 | bool wait_executing) | 2434 | bool wait_executing) |
2386 | { | 2435 | { |
@@ -3009,34 +3058,10 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key); | |||
3009 | */ | 3058 | */ |
3010 | void destroy_workqueue(struct workqueue_struct *wq) | 3059 | void destroy_workqueue(struct workqueue_struct *wq) |
3011 | { | 3060 | { |
3012 | unsigned int flush_cnt = 0; | ||
3013 | unsigned int cpu; | 3061 | unsigned int cpu; |
3014 | 3062 | ||
3015 | /* | 3063 | /* drain it before proceeding with destruction */ |
3016 | * Mark @wq dying and drain all pending works. Once WQ_DYING is | 3064 | drain_workqueue(wq); |
3017 | * set, only chain queueing is allowed. IOW, only currently | ||
3018 | * pending or running work items on @wq can queue further work | ||
3019 | * items on it. @wq is flushed repeatedly until it becomes empty. | ||
3020 | * The number of flushing is detemined by the depth of chaining and | ||
3021 | * should be relatively short. Whine if it takes too long. | ||
3022 | */ | ||
3023 | wq->flags |= WQ_DYING; | ||
3024 | reflush: | ||
3025 | flush_workqueue(wq); | ||
3026 | |||
3027 | for_each_cwq_cpu(cpu, wq) { | ||
3028 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | ||
3029 | |||
3030 | if (!cwq->nr_active && list_empty(&cwq->delayed_works)) | ||
3031 | continue; | ||
3032 | |||
3033 | if (++flush_cnt == 10 || | ||
3034 | (flush_cnt % 100 == 0 && flush_cnt <= 1000)) | ||
3035 | printk(KERN_WARNING "workqueue %s: flush on " | ||
3036 | "destruction isn't complete after %u tries\n", | ||
3037 | wq->name, flush_cnt); | ||
3038 | goto reflush; | ||
3039 | } | ||
3040 | 3065 | ||
3041 | /* | 3066 | /* |
3042 | * wq list is used to freeze wq, remove from list after | 3067 | * wq list is used to freeze wq, remove from list after |