aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c390
1 files changed, 249 insertions, 141 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8bd600c020e5..30acdb74cc23 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1,19 +1,26 @@
1/* 1/*
2 * linux/kernel/workqueue.c 2 * kernel/workqueue.c - generic async execution with shared worker pool
3 * 3 *
4 * Generic mechanism for defining kernel helper threads for running 4 * Copyright (C) 2002 Ingo Molnar
5 * arbitrary tasks in process context.
6 * 5 *
7 * Started by Ingo Molnar, Copyright (C) 2002 6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
8 * 11 *
9 * Derived from the taskqueue/keventd code by: 12 * Made to use alloc_percpu by Christoph Lameter.
10 * 13 *
11 * David Woodhouse <dwmw2@infradead.org> 14 * Copyright (C) 2010 SUSE Linux Products GmbH
12 * Andrew Morton 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 * 16 *
16 * Made to use alloc_percpu by Christoph Lameter. 17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
17 */ 24 */
18 25
19#include <linux/module.h> 26#include <linux/module.h>
@@ -35,9 +42,6 @@
35#include <linux/lockdep.h> 42#include <linux/lockdep.h>
36#include <linux/idr.h> 43#include <linux/idr.h>
37 44
38#define CREATE_TRACE_POINTS
39#include <trace/events/workqueue.h>
40
41#include "workqueue_sched.h" 45#include "workqueue_sched.h"
42 46
43enum { 47enum {
@@ -90,7 +94,8 @@ enum {
90/* 94/*
91 * Structure fields follow one of the following exclusion rules. 95 * Structure fields follow one of the following exclusion rules.
92 * 96 *
93 * I: Set during initialization and read-only afterwards. 97 * I: Modifiable by initialization/destruction paths and read-only for
98 * everyone else.
94 * 99 *
95 * P: Preemption protected. Disabling preemption is enough and should 100 * P: Preemption protected. Disabling preemption is enough and should
96 * only be modified and accessed from the local cpu. 101 * only be modified and accessed from the local cpu.
@@ -198,7 +203,7 @@ typedef cpumask_var_t mayday_mask_t;
198 cpumask_test_and_set_cpu((cpu), (mask)) 203 cpumask_test_and_set_cpu((cpu), (mask))
199#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 204#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
200#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 205#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
201#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 206#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
202#define free_mayday_mask(mask) free_cpumask_var((mask)) 207#define free_mayday_mask(mask) free_cpumask_var((mask))
203#else 208#else
204typedef unsigned long mayday_mask_t; 209typedef unsigned long mayday_mask_t;
@@ -249,6 +254,9 @@ EXPORT_SYMBOL_GPL(system_long_wq);
249EXPORT_SYMBOL_GPL(system_nrt_wq); 254EXPORT_SYMBOL_GPL(system_nrt_wq);
250EXPORT_SYMBOL_GPL(system_unbound_wq); 255EXPORT_SYMBOL_GPL(system_unbound_wq);
251 256
257#define CREATE_TRACE_POINTS
258#include <trace/events/workqueue.h>
259
252#define for_each_busy_worker(worker, i, pos, gcwq) \ 260#define for_each_busy_worker(worker, i, pos, gcwq) \
253 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 261 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
254 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) 262 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
@@ -302,21 +310,6 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
302 (cpu) < WORK_CPU_NONE; \ 310 (cpu) < WORK_CPU_NONE; \
303 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) 311 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
304 312
305#ifdef CONFIG_LOCKDEP
306/**
307 * in_workqueue_context() - in context of specified workqueue?
308 * @wq: the workqueue of interest
309 *
310 * Checks lockdep state to see if the current task is executing from
311 * within a workqueue item. This function exists only if lockdep is
312 * enabled.
313 */
314int in_workqueue_context(struct workqueue_struct *wq)
315{
316 return lock_is_held(&wq->lockdep_map);
317}
318#endif
319
320#ifdef CONFIG_DEBUG_OBJECTS_WORK 313#ifdef CONFIG_DEBUG_OBJECTS_WORK
321 314
322static struct debug_obj_descr work_debug_descr; 315static struct debug_obj_descr work_debug_descr;
@@ -596,7 +589,9 @@ static bool keep_working(struct global_cwq *gcwq)
596{ 589{
597 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); 590 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
598 591
599 return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1; 592 return !list_empty(&gcwq->worklist) &&
593 (atomic_read(nr_running) <= 1 ||
594 gcwq->flags & GCWQ_HIGHPRI_PENDING);
600} 595}
601 596
602/* Do we need a new worker? Called from manager. */ 597/* Do we need a new worker? Called from manager. */
@@ -943,10 +938,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
943 struct global_cwq *gcwq; 938 struct global_cwq *gcwq;
944 struct cpu_workqueue_struct *cwq; 939 struct cpu_workqueue_struct *cwq;
945 struct list_head *worklist; 940 struct list_head *worklist;
941 unsigned int work_flags;
946 unsigned long flags; 942 unsigned long flags;
947 943
948 debug_work_activate(work); 944 debug_work_activate(work);
949 945
946 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
947 return;
948
950 /* determine gcwq to use */ 949 /* determine gcwq to use */
951 if (!(wq->flags & WQ_UNBOUND)) { 950 if (!(wq->flags & WQ_UNBOUND)) {
952 struct global_cwq *last_gcwq; 951 struct global_cwq *last_gcwq;
@@ -985,18 +984,23 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
985 984
986 /* gcwq determined, get cwq and queue */ 985 /* gcwq determined, get cwq and queue */
987 cwq = get_cwq(gcwq->cpu, wq); 986 cwq = get_cwq(gcwq->cpu, wq);
987 trace_workqueue_queue_work(cpu, cwq, work);
988 988
989 BUG_ON(!list_empty(&work->entry)); 989 BUG_ON(!list_empty(&work->entry));
990 990
991 cwq->nr_in_flight[cwq->work_color]++; 991 cwq->nr_in_flight[cwq->work_color]++;
992 work_flags = work_color_to_flags(cwq->work_color);
992 993
993 if (likely(cwq->nr_active < cwq->max_active)) { 994 if (likely(cwq->nr_active < cwq->max_active)) {
995 trace_workqueue_activate_work(work);
994 cwq->nr_active++; 996 cwq->nr_active++;
995 worklist = gcwq_determine_ins_pos(gcwq, cwq); 997 worklist = gcwq_determine_ins_pos(gcwq, cwq);
996 } else 998 } else {
999 work_flags |= WORK_STRUCT_DELAYED;
997 worklist = &cwq->delayed_works; 1000 worklist = &cwq->delayed_works;
1001 }
998 1002
999 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1003 insert_work(cwq, work, worklist, work_flags);
1000 1004
1001 spin_unlock_irqrestore(&gcwq->lock, flags); 1005 spin_unlock_irqrestore(&gcwq->lock, flags);
1002} 1006}
@@ -1215,6 +1219,7 @@ static void worker_leave_idle(struct worker *worker)
1215 * bound), %false if offline. 1219 * bound), %false if offline.
1216 */ 1220 */
1217static bool worker_maybe_bind_and_lock(struct worker *worker) 1221static bool worker_maybe_bind_and_lock(struct worker *worker)
1222__acquires(&gcwq->lock)
1218{ 1223{
1219 struct global_cwq *gcwq = worker->gcwq; 1224 struct global_cwq *gcwq = worker->gcwq;
1220 struct task_struct *task = worker->task; 1225 struct task_struct *task = worker->task;
@@ -1488,6 +1493,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1488 * otherwise. 1493 * otherwise.
1489 */ 1494 */
1490static bool maybe_create_worker(struct global_cwq *gcwq) 1495static bool maybe_create_worker(struct global_cwq *gcwq)
1496__releases(&gcwq->lock)
1497__acquires(&gcwq->lock)
1491{ 1498{
1492 if (!need_to_create_worker(gcwq)) 1499 if (!need_to_create_worker(gcwq))
1493 return false; 1500 return false;
@@ -1661,7 +1668,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1661 struct work_struct, entry); 1668 struct work_struct, entry);
1662 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1669 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1663 1670
1671 trace_workqueue_activate_work(work);
1664 move_linked_works(work, pos, NULL); 1672 move_linked_works(work, pos, NULL);
1673 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1665 cwq->nr_active++; 1674 cwq->nr_active++;
1666} 1675}
1667 1676
@@ -1669,6 +1678,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1669 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1678 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1670 * @cwq: cwq of interest 1679 * @cwq: cwq of interest
1671 * @color: color of work which left the queue 1680 * @color: color of work which left the queue
1681 * @delayed: for a delayed work
1672 * 1682 *
1673 * A work either has completed or is removed from pending queue, 1683 * A work either has completed or is removed from pending queue,
1674 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1684 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1686,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1676 * CONTEXT: 1686 * CONTEXT:
1677 * spin_lock_irq(gcwq->lock). 1687 * spin_lock_irq(gcwq->lock).
1678 */ 1688 */
1679static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1689static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1690 bool delayed)
1680{ 1691{
1681 /* ignore uncolored works */ 1692 /* ignore uncolored works */
1682 if (color == WORK_NO_COLOR) 1693 if (color == WORK_NO_COLOR)
1683 return; 1694 return;
1684 1695
1685 cwq->nr_in_flight[color]--; 1696 cwq->nr_in_flight[color]--;
1686 cwq->nr_active--;
1687 1697
1688 if (!list_empty(&cwq->delayed_works)) { 1698 if (!delayed) {
1689 /* one down, submit a delayed one */ 1699 cwq->nr_active--;
1690 if (cwq->nr_active < cwq->max_active) 1700 if (!list_empty(&cwq->delayed_works)) {
1691 cwq_activate_first_delayed(cwq); 1701 /* one down, submit a delayed one */
1702 if (cwq->nr_active < cwq->max_active)
1703 cwq_activate_first_delayed(cwq);
1704 }
1692 } 1705 }
1693 1706
1694 /* is flush in progress and are we at the flushing tip? */ 1707 /* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1738,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1725 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1738 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1726 */ 1739 */
1727static void process_one_work(struct worker *worker, struct work_struct *work) 1740static void process_one_work(struct worker *worker, struct work_struct *work)
1741__releases(&gcwq->lock)
1742__acquires(&gcwq->lock)
1728{ 1743{
1729 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1744 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1730 struct global_cwq *gcwq = cwq->gcwq; 1745 struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1838,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1823 hlist_del_init(&worker->hentry); 1838 hlist_del_init(&worker->hentry);
1824 worker->current_work = NULL; 1839 worker->current_work = NULL;
1825 worker->current_cwq = NULL; 1840 worker->current_cwq = NULL;
1826 cwq_dec_nr_in_flight(cwq, work_color); 1841 cwq_dec_nr_in_flight(cwq, work_color, false);
1827} 1842}
1828 1843
1829/** 1844/**
@@ -2301,27 +2316,17 @@ out_unlock:
2301} 2316}
2302EXPORT_SYMBOL_GPL(flush_workqueue); 2317EXPORT_SYMBOL_GPL(flush_workqueue);
2303 2318
2304/** 2319static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2305 * flush_work - block until a work_struct's callback has terminated 2320 bool wait_executing)
2306 * @work: the work which is to be flushed
2307 *
2308 * Returns false if @work has already terminated.
2309 *
2310 * It is expected that, prior to calling flush_work(), the caller has
2311 * arranged for the work to not be requeued, otherwise it doesn't make
2312 * sense to use this function.
2313 */
2314int flush_work(struct work_struct *work)
2315{ 2321{
2316 struct worker *worker = NULL; 2322 struct worker *worker = NULL;
2317 struct global_cwq *gcwq; 2323 struct global_cwq *gcwq;
2318 struct cpu_workqueue_struct *cwq; 2324 struct cpu_workqueue_struct *cwq;
2319 struct wq_barrier barr;
2320 2325
2321 might_sleep(); 2326 might_sleep();
2322 gcwq = get_work_gcwq(work); 2327 gcwq = get_work_gcwq(work);
2323 if (!gcwq) 2328 if (!gcwq)
2324 return 0; 2329 return false;
2325 2330
2326 spin_lock_irq(&gcwq->lock); 2331 spin_lock_irq(&gcwq->lock);
2327 if (!list_empty(&work->entry)) { 2332 if (!list_empty(&work->entry)) {
@@ -2334,28 +2339,127 @@ int flush_work(struct work_struct *work)
2334 cwq = get_work_cwq(work); 2339 cwq = get_work_cwq(work);
2335 if (unlikely(!cwq || gcwq != cwq->gcwq)) 2340 if (unlikely(!cwq || gcwq != cwq->gcwq))
2336 goto already_gone; 2341 goto already_gone;
2337 } else { 2342 } else if (wait_executing) {
2338 worker = find_worker_executing_work(gcwq, work); 2343 worker = find_worker_executing_work(gcwq, work);
2339 if (!worker) 2344 if (!worker)
2340 goto already_gone; 2345 goto already_gone;
2341 cwq = worker->current_cwq; 2346 cwq = worker->current_cwq;
2342 } 2347 } else
2348 goto already_gone;
2343 2349
2344 insert_wq_barrier(cwq, &barr, work, worker); 2350 insert_wq_barrier(cwq, barr, work, worker);
2345 spin_unlock_irq(&gcwq->lock); 2351 spin_unlock_irq(&gcwq->lock);
2346 2352
2347 lock_map_acquire(&cwq->wq->lockdep_map); 2353 lock_map_acquire(&cwq->wq->lockdep_map);
2348 lock_map_release(&cwq->wq->lockdep_map); 2354 lock_map_release(&cwq->wq->lockdep_map);
2349 2355 return true;
2350 wait_for_completion(&barr.done);
2351 destroy_work_on_stack(&barr.work);
2352 return 1;
2353already_gone: 2356already_gone:
2354 spin_unlock_irq(&gcwq->lock); 2357 spin_unlock_irq(&gcwq->lock);
2355 return 0; 2358 return false;
2359}
2360
2361/**
2362 * flush_work - wait for a work to finish executing the last queueing instance
2363 * @work: the work to flush
2364 *
2365 * Wait until @work has finished execution. This function considers
2366 * only the last queueing instance of @work. If @work has been
2367 * enqueued across different CPUs on a non-reentrant workqueue or on
2368 * multiple workqueues, @work might still be executing on return on
2369 * some of the CPUs from earlier queueing.
2370 *
2371 * If @work was queued only on a non-reentrant, ordered or unbound
2372 * workqueue, @work is guaranteed to be idle on return if it hasn't
2373 * been requeued since flush started.
2374 *
2375 * RETURNS:
2376 * %true if flush_work() waited for the work to finish execution,
2377 * %false if it was already idle.
2378 */
2379bool flush_work(struct work_struct *work)
2380{
2381 struct wq_barrier barr;
2382
2383 if (start_flush_work(work, &barr, true)) {
2384 wait_for_completion(&barr.done);
2385 destroy_work_on_stack(&barr.work);
2386 return true;
2387 } else
2388 return false;
2356} 2389}
2357EXPORT_SYMBOL_GPL(flush_work); 2390EXPORT_SYMBOL_GPL(flush_work);
2358 2391
2392static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2393{
2394 struct wq_barrier barr;
2395 struct worker *worker;
2396
2397 spin_lock_irq(&gcwq->lock);
2398
2399 worker = find_worker_executing_work(gcwq, work);
2400 if (unlikely(worker))
2401 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2402
2403 spin_unlock_irq(&gcwq->lock);
2404
2405 if (unlikely(worker)) {
2406 wait_for_completion(&barr.done);
2407 destroy_work_on_stack(&barr.work);
2408 return true;
2409 } else
2410 return false;
2411}
2412
2413static bool wait_on_work(struct work_struct *work)
2414{
2415 bool ret = false;
2416 int cpu;
2417
2418 might_sleep();
2419
2420 lock_map_acquire(&work->lockdep_map);
2421 lock_map_release(&work->lockdep_map);
2422
2423 for_each_gcwq_cpu(cpu)
2424 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2425 return ret;
2426}
2427
2428/**
2429 * flush_work_sync - wait until a work has finished execution
2430 * @work: the work to flush
2431 *
2432 * Wait until @work has finished execution. On return, it's
2433 * guaranteed that all queueing instances of @work which happened
2434 * before this function is called are finished. In other words, if
2435 * @work hasn't been requeued since this function was called, @work is
2436 * guaranteed to be idle on return.
2437 *
2438 * RETURNS:
2439 * %true if flush_work_sync() waited for the work to finish execution,
2440 * %false if it was already idle.
2441 */
2442bool flush_work_sync(struct work_struct *work)
2443{
2444 struct wq_barrier barr;
2445 bool pending, waited;
2446
2447 /* we'll wait for executions separately, queue barr only if pending */
2448 pending = start_flush_work(work, &barr, false);
2449
2450 /* wait for executions to finish */
2451 waited = wait_on_work(work);
2452
2453 /* wait for the pending one */
2454 if (pending) {
2455 wait_for_completion(&barr.done);
2456 destroy_work_on_stack(&barr.work);
2457 }
2458
2459 return pending || waited;
2460}
2461EXPORT_SYMBOL_GPL(flush_work_sync);
2462
2359/* 2463/*
2360 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 2464 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2361 * so this work can't be re-armed in any way. 2465 * so this work can't be re-armed in any way.
@@ -2388,7 +2492,8 @@ static int try_to_grab_pending(struct work_struct *work)
2388 debug_work_deactivate(work); 2492 debug_work_deactivate(work);
2389 list_del_init(&work->entry); 2493 list_del_init(&work->entry);
2390 cwq_dec_nr_in_flight(get_work_cwq(work), 2494 cwq_dec_nr_in_flight(get_work_cwq(work),
2391 get_work_color(work)); 2495 get_work_color(work),
2496 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2392 ret = 1; 2497 ret = 1;
2393 } 2498 }
2394 } 2499 }
@@ -2397,39 +2502,7 @@ static int try_to_grab_pending(struct work_struct *work)
2397 return ret; 2502 return ret;
2398} 2503}
2399 2504
2400static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) 2505static bool __cancel_work_timer(struct work_struct *work,
2401{
2402 struct wq_barrier barr;
2403 struct worker *worker;
2404
2405 spin_lock_irq(&gcwq->lock);
2406
2407 worker = find_worker_executing_work(gcwq, work);
2408 if (unlikely(worker))
2409 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2410
2411 spin_unlock_irq(&gcwq->lock);
2412
2413 if (unlikely(worker)) {
2414 wait_for_completion(&barr.done);
2415 destroy_work_on_stack(&barr.work);
2416 }
2417}
2418
2419static void wait_on_work(struct work_struct *work)
2420{
2421 int cpu;
2422
2423 might_sleep();
2424
2425 lock_map_acquire(&work->lockdep_map);
2426 lock_map_release(&work->lockdep_map);
2427
2428 for_each_gcwq_cpu(cpu)
2429 wait_on_cpu_work(get_gcwq(cpu), work);
2430}
2431
2432static int __cancel_work_timer(struct work_struct *work,
2433 struct timer_list* timer) 2506 struct timer_list* timer)
2434{ 2507{
2435 int ret; 2508 int ret;
@@ -2446,42 +2519,81 @@ static int __cancel_work_timer(struct work_struct *work,
2446} 2519}
2447 2520
2448/** 2521/**
2449 * cancel_work_sync - block until a work_struct's callback has terminated 2522 * cancel_work_sync - cancel a work and wait for it to finish
2450 * @work: the work which is to be flushed 2523 * @work: the work to cancel
2451 * 2524 *
2452 * Returns true if @work was pending. 2525 * Cancel @work and wait for its execution to finish. This function
2453 * 2526 * can be used even if the work re-queues itself or migrates to
2454 * cancel_work_sync() will cancel the work if it is queued. If the work's 2527 * another workqueue. On return from this function, @work is
2455 * callback appears to be running, cancel_work_sync() will block until it 2528 * guaranteed to be not pending or executing on any CPU.
2456 * has completed.
2457 *
2458 * It is possible to use this function if the work re-queues itself. It can
2459 * cancel the work even if it migrates to another workqueue, however in that
2460 * case it only guarantees that work->func() has completed on the last queued
2461 * workqueue.
2462 * 2529 *
2463 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 2530 * cancel_work_sync(&delayed_work->work) must not be used for
2464 * pending, otherwise it goes into a busy-wait loop until the timer expires. 2531 * delayed_work's. Use cancel_delayed_work_sync() instead.
2465 * 2532 *
2466 * The caller must ensure that workqueue_struct on which this work was last 2533 * The caller must ensure that the workqueue on which @work was last
2467 * queued can't be destroyed before this function returns. 2534 * queued can't be destroyed before this function returns.
2535 *
2536 * RETURNS:
2537 * %true if @work was pending, %false otherwise.
2468 */ 2538 */
2469int cancel_work_sync(struct work_struct *work) 2539bool cancel_work_sync(struct work_struct *work)
2470{ 2540{
2471 return __cancel_work_timer(work, NULL); 2541 return __cancel_work_timer(work, NULL);
2472} 2542}
2473EXPORT_SYMBOL_GPL(cancel_work_sync); 2543EXPORT_SYMBOL_GPL(cancel_work_sync);
2474 2544
2475/** 2545/**
2476 * cancel_delayed_work_sync - reliably kill off a delayed work. 2546 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2477 * @dwork: the delayed work struct 2547 * @dwork: the delayed work to flush
2548 *
2549 * Delayed timer is cancelled and the pending work is queued for
2550 * immediate execution. Like flush_work(), this function only
2551 * considers the last queueing instance of @dwork.
2552 *
2553 * RETURNS:
2554 * %true if flush_work() waited for the work to finish execution,
2555 * %false if it was already idle.
2556 */
2557bool flush_delayed_work(struct delayed_work *dwork)
2558{
2559 if (del_timer_sync(&dwork->timer))
2560 __queue_work(raw_smp_processor_id(),
2561 get_work_cwq(&dwork->work)->wq, &dwork->work);
2562 return flush_work(&dwork->work);
2563}
2564EXPORT_SYMBOL(flush_delayed_work);
2565
2566/**
2567 * flush_delayed_work_sync - wait for a dwork to finish
2568 * @dwork: the delayed work to flush
2569 *
2570 * Delayed timer is cancelled and the pending work is queued for
2571 * execution immediately. Other than timer handling, its behavior
2572 * is identical to flush_work_sync().
2573 *
2574 * RETURNS:
2575 * %true if flush_work_sync() waited for the work to finish execution,
2576 * %false if it was already idle.
2577 */
2578bool flush_delayed_work_sync(struct delayed_work *dwork)
2579{
2580 if (del_timer_sync(&dwork->timer))
2581 __queue_work(raw_smp_processor_id(),
2582 get_work_cwq(&dwork->work)->wq, &dwork->work);
2583 return flush_work_sync(&dwork->work);
2584}
2585EXPORT_SYMBOL(flush_delayed_work_sync);
2586
2587/**
2588 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2589 * @dwork: the delayed work cancel
2478 * 2590 *
2479 * Returns true if @dwork was pending. 2591 * This is cancel_work_sync() for delayed works.
2480 * 2592 *
2481 * It is possible to use this function if @dwork rearms itself via queue_work() 2593 * RETURNS:
2482 * or queue_delayed_work(). See also the comment for cancel_work_sync(). 2594 * %true if @dwork was pending, %false otherwise.
2483 */ 2595 */
2484int cancel_delayed_work_sync(struct delayed_work *dwork) 2596bool cancel_delayed_work_sync(struct delayed_work *dwork)
2485{ 2597{
2486 return __cancel_work_timer(&dwork->work, &dwork->timer); 2598 return __cancel_work_timer(&dwork->work, &dwork->timer);
2487} 2599}
@@ -2533,23 +2645,6 @@ int schedule_delayed_work(struct delayed_work *dwork,
2533EXPORT_SYMBOL(schedule_delayed_work); 2645EXPORT_SYMBOL(schedule_delayed_work);
2534 2646
2535/** 2647/**
2536 * flush_delayed_work - block until a dwork_struct's callback has terminated
2537 * @dwork: the delayed work which is to be flushed
2538 *
2539 * Any timeout is cancelled, and any pending work is run immediately.
2540 */
2541void flush_delayed_work(struct delayed_work *dwork)
2542{
2543 if (del_timer_sync(&dwork->timer)) {
2544 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
2545 &dwork->work);
2546 put_cpu();
2547 }
2548 flush_work(&dwork->work);
2549}
2550EXPORT_SYMBOL(flush_delayed_work);
2551
2552/**
2553 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 2648 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2554 * @cpu: cpu to use 2649 * @cpu: cpu to use
2555 * @dwork: job to be done 2650 * @dwork: job to be done
@@ -2566,13 +2661,15 @@ int schedule_delayed_work_on(int cpu,
2566EXPORT_SYMBOL(schedule_delayed_work_on); 2661EXPORT_SYMBOL(schedule_delayed_work_on);
2567 2662
2568/** 2663/**
2569 * schedule_on_each_cpu - call a function on each online CPU from keventd 2664 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2570 * @func: the function to call 2665 * @func: the function to call
2571 * 2666 *
2572 * Returns zero on success. 2667 * schedule_on_each_cpu() executes @func on each online CPU using the
2573 * Returns -ve errno on failure. 2668 * system workqueue and blocks until all CPUs have completed.
2574 *
2575 * schedule_on_each_cpu() is very slow. 2669 * schedule_on_each_cpu() is very slow.
2670 *
2671 * RETURNS:
2672 * 0 on success, -errno on failure.
2576 */ 2673 */
2577int schedule_on_each_cpu(work_func_t func) 2674int schedule_on_each_cpu(work_func_t func)
2578{ 2675{
@@ -2738,6 +2835,13 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2738 unsigned int cpu; 2835 unsigned int cpu;
2739 2836
2740 /* 2837 /*
2838 * Workqueues which may be used during memory reclaim should
2839 * have a rescuer to guarantee forward progress.
2840 */
2841 if (flags & WQ_MEM_RECLAIM)
2842 flags |= WQ_RESCUER;
2843
2844 /*
2741 * Unbound workqueues aren't concurrency managed and should be 2845 * Unbound workqueues aren't concurrency managed and should be
2742 * dispatched to workers immediately. 2846 * dispatched to workers immediately.
2743 */ 2847 */
@@ -2791,7 +2895,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2791 if (IS_ERR(rescuer->task)) 2895 if (IS_ERR(rescuer->task))
2792 goto err; 2896 goto err;
2793 2897
2794 wq->rescuer = rescuer;
2795 rescuer->task->flags |= PF_THREAD_BOUND; 2898 rescuer->task->flags |= PF_THREAD_BOUND;
2796 wake_up_process(rescuer->task); 2899 wake_up_process(rescuer->task);
2797 } 2900 }
@@ -2833,6 +2936,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2833{ 2936{
2834 unsigned int cpu; 2937 unsigned int cpu;
2835 2938
2939 wq->flags |= WQ_DYING;
2836 flush_workqueue(wq); 2940 flush_workqueue(wq);
2837 2941
2838 /* 2942 /*
@@ -2857,6 +2961,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2857 if (wq->flags & WQ_RESCUER) { 2961 if (wq->flags & WQ_RESCUER) {
2858 kthread_stop(wq->rescuer->task); 2962 kthread_stop(wq->rescuer->task);
2859 free_mayday_mask(wq->mayday_mask); 2963 free_mayday_mask(wq->mayday_mask);
2964 kfree(wq->rescuer);
2860 } 2965 }
2861 2966
2862 free_cwqs(wq); 2967 free_cwqs(wq);
@@ -3239,6 +3344,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3239 * multiple times. To be used by cpu_callback. 3344 * multiple times. To be used by cpu_callback.
3240 */ 3345 */
3241static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3346static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3347__releases(&gcwq->lock)
3348__acquires(&gcwq->lock)
3242{ 3349{
3243 if (!(gcwq->trustee_state == state || 3350 if (!(gcwq->trustee_state == state ||
3244 gcwq->trustee_state == TRUSTEE_DONE)) { 3351 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3652,7 @@ static int __init init_workqueues(void)
3545 spin_lock_init(&gcwq->lock); 3652 spin_lock_init(&gcwq->lock);
3546 INIT_LIST_HEAD(&gcwq->worklist); 3653 INIT_LIST_HEAD(&gcwq->worklist);
3547 gcwq->cpu = cpu; 3654 gcwq->cpu = cpu;
3548 if (cpu == WORK_CPU_UNBOUND) 3655 gcwq->flags |= GCWQ_DISASSOCIATED;
3549 gcwq->flags |= GCWQ_DISASSOCIATED;
3550 3656
3551 INIT_LIST_HEAD(&gcwq->idle_list); 3657 INIT_LIST_HEAD(&gcwq->idle_list);
3552 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3658 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3676,8 @@ static int __init init_workqueues(void)
3570 struct global_cwq *gcwq = get_gcwq(cpu); 3676 struct global_cwq *gcwq = get_gcwq(cpu);
3571 struct worker *worker; 3677 struct worker *worker;
3572 3678
3679 if (cpu != WORK_CPU_UNBOUND)
3680 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3573 worker = create_worker(gcwq, true); 3681 worker = create_worker(gcwq, true);
3574 BUG_ON(!worker); 3682 BUG_ON(!worker);
3575 spin_lock_irq(&gcwq->lock); 3683 spin_lock_irq(&gcwq->lock);