aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c3182
1 files changed, 2778 insertions, 404 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 327d2deb4451..8bd600c020e5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -33,41 +33,290 @@
33#include <linux/kallsyms.h> 33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h> 34#include <linux/debug_locks.h>
35#include <linux/lockdep.h> 35#include <linux/lockdep.h>
36#include <linux/idr.h>
37
36#define CREATE_TRACE_POINTS 38#define CREATE_TRACE_POINTS
37#include <trace/events/workqueue.h> 39#include <trace/events/workqueue.h>
38 40
41#include "workqueue_sched.h"
42
43enum {
44 /* global_cwq flags */
45 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
46 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
47 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
48 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
49 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
50
51 /* worker flags */
52 WORKER_STARTED = 1 << 0, /* started */
53 WORKER_DIE = 1 << 1, /* die die die */
54 WORKER_IDLE = 1 << 2, /* is idle */
55 WORKER_PREP = 1 << 3, /* preparing to run works */
56 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
57 WORKER_REBIND = 1 << 5, /* mom is home, come back */
58 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
59 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
60
61 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
62 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
63
64 /* gcwq->trustee_state */
65 TRUSTEE_START = 0, /* start */
66 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
67 TRUSTEE_BUTCHER = 2, /* butcher workers */
68 TRUSTEE_RELEASE = 3, /* release workers */
69 TRUSTEE_DONE = 4, /* trustee is done */
70
71 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
72 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
73 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
74
75 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
76 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
77
78 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */
79 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
80 CREATE_COOLDOWN = HZ, /* time to breath after fail */
81 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
82
83 /*
84 * Rescue workers are used only on emergencies and shared by
85 * all cpus. Give -20.
86 */
87 RESCUER_NICE_LEVEL = -20,
88};
89
39/* 90/*
40 * The per-CPU workqueue (if single thread, we always use the first 91 * Structure fields follow one of the following exclusion rules.
41 * possible cpu). 92 *
93 * I: Set during initialization and read-only afterwards.
94 *
95 * P: Preemption protected. Disabling preemption is enough and should
96 * only be modified and accessed from the local cpu.
97 *
98 * L: gcwq->lock protected. Access with gcwq->lock held.
99 *
100 * X: During normal operation, modification requires gcwq->lock and
101 * should be done only from local cpu. Either disabling preemption
102 * on local cpu or grabbing gcwq->lock is enough for read access.
103 * If GCWQ_DISASSOCIATED is set, it's identical to L.
104 *
105 * F: wq->flush_mutex protected.
106 *
107 * W: workqueue_lock protected.
42 */ 108 */
43struct cpu_workqueue_struct {
44 109
45 spinlock_t lock; 110struct global_cwq;
111
112/*
113 * The poor guys doing the actual heavy lifting. All on-duty workers
114 * are either serving the manager role, on idle list or on busy hash.
115 */
116struct worker {
117 /* on idle list while idle, on busy hash table while busy */
118 union {
119 struct list_head entry; /* L: while idle */
120 struct hlist_node hentry; /* L: while busy */
121 };
122
123 struct work_struct *current_work; /* L: work being processed */
124 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
125 struct list_head scheduled; /* L: scheduled works */
126 struct task_struct *task; /* I: worker task */
127 struct global_cwq *gcwq; /* I: the associated gcwq */
128 /* 64 bytes boundary on 64bit, 32 on 32bit */
129 unsigned long last_active; /* L: last active timestamp */
130 unsigned int flags; /* X: flags */
131 int id; /* I: worker id */
132 struct work_struct rebind_work; /* L: rebind worker to cpu */
133};
134
135/*
136 * Global per-cpu workqueue. There's one and only one for each cpu
137 * and all works are queued and processed here regardless of their
138 * target workqueues.
139 */
140struct global_cwq {
141 spinlock_t lock; /* the gcwq lock */
142 struct list_head worklist; /* L: list of pending works */
143 unsigned int cpu; /* I: the associated cpu */
144 unsigned int flags; /* L: GCWQ_* flags */
46 145
47 struct list_head worklist; 146 int nr_workers; /* L: total number of workers */
48 wait_queue_head_t more_work; 147 int nr_idle; /* L: currently idle ones */
49 struct work_struct *current_work;
50 148
51 struct workqueue_struct *wq; 149 /* workers are chained either in the idle_list or busy_hash */
52 struct task_struct *thread; 150 struct list_head idle_list; /* X: list of idle workers */
53} ____cacheline_aligned; 151 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
152 /* L: hash of busy workers */
153
154 struct timer_list idle_timer; /* L: worker idle timeout */
155 struct timer_list mayday_timer; /* L: SOS timer for dworkers */
156
157 struct ida worker_ida; /* L: for worker IDs */
158
159 struct task_struct *trustee; /* L: for gcwq shutdown */
160 unsigned int trustee_state; /* L: trustee state */
161 wait_queue_head_t trustee_wait; /* trustee wait */
162 struct worker *first_idle; /* L: first idle worker */
163} ____cacheline_aligned_in_smp;
164
165/*
166 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
167 * work_struct->data are used for flags and thus cwqs need to be
168 * aligned at two's power of the number of flag bits.
169 */
170struct cpu_workqueue_struct {
171 struct global_cwq *gcwq; /* I: the associated gcwq */
172 struct workqueue_struct *wq; /* I: the owning workqueue */
173 int work_color; /* L: current color */
174 int flush_color; /* L: flushing color */
175 int nr_in_flight[WORK_NR_COLORS];
176 /* L: nr of in_flight works */
177 int nr_active; /* L: nr of active works */
178 int max_active; /* L: max active works */
179 struct list_head delayed_works; /* L: delayed works */
180};
181
182/*
183 * Structure used to wait for workqueue flush.
184 */
185struct wq_flusher {
186 struct list_head list; /* F: list of flushers */
187 int flush_color; /* F: flush color waiting for */
188 struct completion done; /* flush completion */
189};
190
191/*
192 * All cpumasks are assumed to be always set on UP and thus can't be
193 * used to determine whether there's something to be done.
194 */
195#ifdef CONFIG_SMP
196typedef cpumask_var_t mayday_mask_t;
197#define mayday_test_and_set_cpu(cpu, mask) \
198 cpumask_test_and_set_cpu((cpu), (mask))
199#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
200#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
201#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp))
202#define free_mayday_mask(mask) free_cpumask_var((mask))
203#else
204typedef unsigned long mayday_mask_t;
205#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
206#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
207#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
208#define alloc_mayday_mask(maskp, gfp) true
209#define free_mayday_mask(mask) do { } while (0)
210#endif
54 211
55/* 212/*
56 * The externally visible workqueue abstraction is an array of 213 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues: 214 * per-CPU workqueues:
58 */ 215 */
59struct workqueue_struct { 216struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq; 217 unsigned int flags; /* I: WQ_* flags */
61 struct list_head list; 218 union {
62 const char *name; 219 struct cpu_workqueue_struct __percpu *pcpu;
63 int singlethread; 220 struct cpu_workqueue_struct *single;
64 int freezeable; /* Freeze threads during suspend */ 221 unsigned long v;
65 int rt; 222 } cpu_wq; /* I: cwq's */
223 struct list_head list; /* W: list of all workqueues */
224
225 struct mutex flush_mutex; /* protects wq flushing */
226 int work_color; /* F: current work color */
227 int flush_color; /* F: current flush color */
228 atomic_t nr_cwqs_to_flush; /* flush in progress */
229 struct wq_flusher *first_flusher; /* F: first flusher */
230 struct list_head flusher_queue; /* F: flush waiters */
231 struct list_head flusher_overflow; /* F: flush overflow list */
232
233 mayday_mask_t mayday_mask; /* cpus requesting rescue */
234 struct worker *rescuer; /* I: rescue worker */
235
236 int saved_max_active; /* W: saved cwq max_active */
237 const char *name; /* I: workqueue name */
66#ifdef CONFIG_LOCKDEP 238#ifdef CONFIG_LOCKDEP
67 struct lockdep_map lockdep_map; 239 struct lockdep_map lockdep_map;
68#endif 240#endif
69}; 241};
70 242
243struct workqueue_struct *system_wq __read_mostly;
244struct workqueue_struct *system_long_wq __read_mostly;
245struct workqueue_struct *system_nrt_wq __read_mostly;
246struct workqueue_struct *system_unbound_wq __read_mostly;
247EXPORT_SYMBOL_GPL(system_wq);
248EXPORT_SYMBOL_GPL(system_long_wq);
249EXPORT_SYMBOL_GPL(system_nrt_wq);
250EXPORT_SYMBOL_GPL(system_unbound_wq);
251
252#define for_each_busy_worker(worker, i, pos, gcwq) \
253 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
254 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
255
256static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
257 unsigned int sw)
258{
259 if (cpu < nr_cpu_ids) {
260 if (sw & 1) {
261 cpu = cpumask_next(cpu, mask);
262 if (cpu < nr_cpu_ids)
263 return cpu;
264 }
265 if (sw & 2)
266 return WORK_CPU_UNBOUND;
267 }
268 return WORK_CPU_NONE;
269}
270
271static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
272 struct workqueue_struct *wq)
273{
274 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
275}
276
277/*
278 * CPU iterators
279 *
280 * An extra gcwq is defined for an invalid cpu number
281 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
282 * specific CPU. The following iterators are similar to
283 * for_each_*_cpu() iterators but also considers the unbound gcwq.
284 *
285 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
286 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
287 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
288 * WORK_CPU_UNBOUND for unbound workqueues
289 */
290#define for_each_gcwq_cpu(cpu) \
291 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
292 (cpu) < WORK_CPU_NONE; \
293 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
294
295#define for_each_online_gcwq_cpu(cpu) \
296 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
297 (cpu) < WORK_CPU_NONE; \
298 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
299
300#define for_each_cwq_cpu(cpu, wq) \
301 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
302 (cpu) < WORK_CPU_NONE; \
303 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
304
305#ifdef CONFIG_LOCKDEP
306/**
307 * in_workqueue_context() - in context of specified workqueue?
308 * @wq: the workqueue of interest
309 *
310 * Checks lockdep state to see if the current task is executing from
311 * within a workqueue item. This function exists only if lockdep is
312 * enabled.
313 */
314int in_workqueue_context(struct workqueue_struct *wq)
315{
316 return lock_is_held(&wq->lockdep_map);
317}
318#endif
319
71#ifdef CONFIG_DEBUG_OBJECTS_WORK 320#ifdef CONFIG_DEBUG_OBJECTS_WORK
72 321
73static struct debug_obj_descr work_debug_descr; 322static struct debug_obj_descr work_debug_descr;
@@ -107,7 +356,7 @@ static int work_fixup_activate(void *addr, enum debug_obj_state state)
107 * statically initialized. We just make sure that it 356 * statically initialized. We just make sure that it
108 * is tracked in the object tracker. 357 * is tracked in the object tracker.
109 */ 358 */
110 if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { 359 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
111 debug_object_init(work, &work_debug_descr); 360 debug_object_init(work, &work_debug_descr);
112 debug_object_activate(work, &work_debug_descr); 361 debug_object_activate(work, &work_debug_descr);
113 return 0; 362 return 0;
@@ -181,94 +430,575 @@ static inline void debug_work_deactivate(struct work_struct *work) { }
181/* Serializes the accesses to the list of workqueues. */ 430/* Serializes the accesses to the list of workqueues. */
182static DEFINE_SPINLOCK(workqueue_lock); 431static DEFINE_SPINLOCK(workqueue_lock);
183static LIST_HEAD(workqueues); 432static LIST_HEAD(workqueues);
433static bool workqueue_freezing; /* W: have wqs started freezing? */
434
435/*
436 * The almighty global cpu workqueues. nr_running is the only field
437 * which is expected to be used frequently by other cpus via
438 * try_to_wake_up(). Put it in a separate cacheline.
439 */
440static DEFINE_PER_CPU(struct global_cwq, global_cwq);
441static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
184 442
185static int singlethread_cpu __read_mostly;
186static const struct cpumask *cpu_singlethread_map __read_mostly;
187/* 443/*
188 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 444 * Global cpu workqueue and nr_running counter for unbound gcwq. The
189 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work 445 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
190 * which comes in between can't use for_each_online_cpu(). We could 446 * workers have WORKER_UNBOUND set.
191 * use cpu_possible_map, the cpumask below is more a documentation
192 * than optimization.
193 */ 447 */
194static cpumask_var_t cpu_populated_map __read_mostly; 448static struct global_cwq unbound_global_cwq;
449static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
450
451static int worker_thread(void *__worker);
452
453static struct global_cwq *get_gcwq(unsigned int cpu)
454{
455 if (cpu != WORK_CPU_UNBOUND)
456 return &per_cpu(global_cwq, cpu);
457 else
458 return &unbound_global_cwq;
459}
460
461static atomic_t *get_gcwq_nr_running(unsigned int cpu)
462{
463 if (cpu != WORK_CPU_UNBOUND)
464 return &per_cpu(gcwq_nr_running, cpu);
465 else
466 return &unbound_gcwq_nr_running;
467}
195 468
196/* If it's single threaded, it isn't in the list of workqueues. */ 469static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
197static inline int is_wq_single_threaded(struct workqueue_struct *wq) 470 struct workqueue_struct *wq)
198{ 471{
199 return wq->singlethread; 472 if (!(wq->flags & WQ_UNBOUND)) {
473 if (likely(cpu < nr_cpu_ids)) {
474#ifdef CONFIG_SMP
475 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
476#else
477 return wq->cpu_wq.single;
478#endif
479 }
480 } else if (likely(cpu == WORK_CPU_UNBOUND))
481 return wq->cpu_wq.single;
482 return NULL;
200} 483}
201 484
202static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) 485static unsigned int work_color_to_flags(int color)
203{ 486{
204 return is_wq_single_threaded(wq) 487 return color << WORK_STRUCT_COLOR_SHIFT;
205 ? cpu_singlethread_map : cpu_populated_map;
206} 488}
207 489
208static 490static int get_work_color(struct work_struct *work)
209struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
210{ 491{
211 if (unlikely(is_wq_single_threaded(wq))) 492 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
212 cpu = singlethread_cpu; 493 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
213 return per_cpu_ptr(wq->cpu_wq, cpu); 494}
495
496static int work_next_color(int color)
497{
498 return (color + 1) % WORK_NR_COLORS;
214} 499}
215 500
216/* 501/*
217 * Set the workqueue on which a work item is to be run 502 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
218 * - Must *only* be called if the pending flag is set 503 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
504 * cleared and the work data contains the cpu number it was last on.
505 *
506 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
507 * cwq, cpu or clear work->data. These functions should only be
508 * called while the work is owned - ie. while the PENDING bit is set.
509 *
510 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
511 * corresponding to a work. gcwq is available once the work has been
512 * queued anywhere after initialization. cwq is available only from
513 * queueing until execution starts.
219 */ 514 */
220static inline void set_wq_data(struct work_struct *work, 515static inline void set_work_data(struct work_struct *work, unsigned long data,
221 struct cpu_workqueue_struct *cwq) 516 unsigned long flags)
222{ 517{
223 unsigned long new;
224
225 BUG_ON(!work_pending(work)); 518 BUG_ON(!work_pending(work));
519 atomic_long_set(&work->data, data | flags | work_static(work));
520}
226 521
227 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); 522static void set_work_cwq(struct work_struct *work,
228 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 523 struct cpu_workqueue_struct *cwq,
229 atomic_long_set(&work->data, new); 524 unsigned long extra_flags)
525{
526 set_work_data(work, (unsigned long)cwq,
527 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
528}
529
530static void set_work_cpu(struct work_struct *work, unsigned int cpu)
531{
532 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
533}
534
535static void clear_work_data(struct work_struct *work)
536{
537 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
538}
539
540static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
541{
542 unsigned long data = atomic_long_read(&work->data);
543
544 if (data & WORK_STRUCT_CWQ)
545 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
546 else
547 return NULL;
548}
549
550static struct global_cwq *get_work_gcwq(struct work_struct *work)
551{
552 unsigned long data = atomic_long_read(&work->data);
553 unsigned int cpu;
554
555 if (data & WORK_STRUCT_CWQ)
556 return ((struct cpu_workqueue_struct *)
557 (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
558
559 cpu = data >> WORK_STRUCT_FLAG_BITS;
560 if (cpu == WORK_CPU_NONE)
561 return NULL;
562
563 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
564 return get_gcwq(cpu);
565}
566
567/*
568 * Policy functions. These define the policies on how the global
569 * worker pool is managed. Unless noted otherwise, these functions
570 * assume that they're being called with gcwq->lock held.
571 */
572
573static bool __need_more_worker(struct global_cwq *gcwq)
574{
575 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
576 gcwq->flags & GCWQ_HIGHPRI_PENDING;
230} 577}
231 578
232/* 579/*
233 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued. 580 * Need to wake up a worker? Called from anything but currently
581 * running workers.
234 */ 582 */
235static inline void clear_wq_data(struct work_struct *work) 583static bool need_more_worker(struct global_cwq *gcwq)
236{ 584{
237 unsigned long flags = *work_data_bits(work) & 585 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
238 (1UL << WORK_STRUCT_STATIC);
239 atomic_long_set(&work->data, flags);
240} 586}
241 587
242static inline 588/* Can I start working? Called from busy but !running workers. */
243struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 589static bool may_start_working(struct global_cwq *gcwq)
244{ 590{
245 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 591 return gcwq->nr_idle;
246} 592}
247 593
594/* Do I need to keep working? Called from currently running workers. */
595static bool keep_working(struct global_cwq *gcwq)
596{
597 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
598
599 return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
600}
601
602/* Do we need a new worker? Called from manager. */
603static bool need_to_create_worker(struct global_cwq *gcwq)
604{
605 return need_more_worker(gcwq) && !may_start_working(gcwq);
606}
607
608/* Do I need to be the manager? */
609static bool need_to_manage_workers(struct global_cwq *gcwq)
610{
611 return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
612}
613
614/* Do we have too many workers and should some go away? */
615static bool too_many_workers(struct global_cwq *gcwq)
616{
617 bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
618 int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
619 int nr_busy = gcwq->nr_workers - nr_idle;
620
621 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
622}
623
624/*
625 * Wake up functions.
626 */
627
628/* Return the first worker. Safe with preemption disabled */
629static struct worker *first_worker(struct global_cwq *gcwq)
630{
631 if (unlikely(list_empty(&gcwq->idle_list)))
632 return NULL;
633
634 return list_first_entry(&gcwq->idle_list, struct worker, entry);
635}
636
637/**
638 * wake_up_worker - wake up an idle worker
639 * @gcwq: gcwq to wake worker for
640 *
641 * Wake up the first idle worker of @gcwq.
642 *
643 * CONTEXT:
644 * spin_lock_irq(gcwq->lock).
645 */
646static void wake_up_worker(struct global_cwq *gcwq)
647{
648 struct worker *worker = first_worker(gcwq);
649
650 if (likely(worker))
651 wake_up_process(worker->task);
652}
653
654/**
655 * wq_worker_waking_up - a worker is waking up
656 * @task: task waking up
657 * @cpu: CPU @task is waking up to
658 *
659 * This function is called during try_to_wake_up() when a worker is
660 * being awoken.
661 *
662 * CONTEXT:
663 * spin_lock_irq(rq->lock)
664 */
665void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
666{
667 struct worker *worker = kthread_data(task);
668
669 if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
670 atomic_inc(get_gcwq_nr_running(cpu));
671}
672
673/**
674 * wq_worker_sleeping - a worker is going to sleep
675 * @task: task going to sleep
676 * @cpu: CPU in question, must be the current CPU number
677 *
678 * This function is called during schedule() when a busy worker is
679 * going to sleep. Worker on the same cpu can be woken up by
680 * returning pointer to its task.
681 *
682 * CONTEXT:
683 * spin_lock_irq(rq->lock)
684 *
685 * RETURNS:
686 * Worker task on @cpu to wake up, %NULL if none.
687 */
688struct task_struct *wq_worker_sleeping(struct task_struct *task,
689 unsigned int cpu)
690{
691 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
692 struct global_cwq *gcwq = get_gcwq(cpu);
693 atomic_t *nr_running = get_gcwq_nr_running(cpu);
694
695 if (unlikely(worker->flags & WORKER_NOT_RUNNING))
696 return NULL;
697
698 /* this can only happen on the local cpu */
699 BUG_ON(cpu != raw_smp_processor_id());
700
701 /*
702 * The counterpart of the following dec_and_test, implied mb,
703 * worklist not empty test sequence is in insert_work().
704 * Please read comment there.
705 *
706 * NOT_RUNNING is clear. This means that trustee is not in
707 * charge and we're running on the local cpu w/ rq lock held
708 * and preemption disabled, which in turn means that none else
709 * could be manipulating idle_list, so dereferencing idle_list
710 * without gcwq lock is safe.
711 */
712 if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
713 to_wakeup = first_worker(gcwq);
714 return to_wakeup ? to_wakeup->task : NULL;
715}
716
717/**
718 * worker_set_flags - set worker flags and adjust nr_running accordingly
719 * @worker: self
720 * @flags: flags to set
721 * @wakeup: wakeup an idle worker if necessary
722 *
723 * Set @flags in @worker->flags and adjust nr_running accordingly. If
724 * nr_running becomes zero and @wakeup is %true, an idle worker is
725 * woken up.
726 *
727 * CONTEXT:
728 * spin_lock_irq(gcwq->lock)
729 */
730static inline void worker_set_flags(struct worker *worker, unsigned int flags,
731 bool wakeup)
732{
733 struct global_cwq *gcwq = worker->gcwq;
734
735 WARN_ON_ONCE(worker->task != current);
736
737 /*
738 * If transitioning into NOT_RUNNING, adjust nr_running and
739 * wake up an idle worker as necessary if requested by
740 * @wakeup.
741 */
742 if ((flags & WORKER_NOT_RUNNING) &&
743 !(worker->flags & WORKER_NOT_RUNNING)) {
744 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
745
746 if (wakeup) {
747 if (atomic_dec_and_test(nr_running) &&
748 !list_empty(&gcwq->worklist))
749 wake_up_worker(gcwq);
750 } else
751 atomic_dec(nr_running);
752 }
753
754 worker->flags |= flags;
755}
756
757/**
758 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
759 * @worker: self
760 * @flags: flags to clear
761 *
762 * Clear @flags in @worker->flags and adjust nr_running accordingly.
763 *
764 * CONTEXT:
765 * spin_lock_irq(gcwq->lock)
766 */
767static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
768{
769 struct global_cwq *gcwq = worker->gcwq;
770 unsigned int oflags = worker->flags;
771
772 WARN_ON_ONCE(worker->task != current);
773
774 worker->flags &= ~flags;
775
776 /* if transitioning out of NOT_RUNNING, increment nr_running */
777 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
778 if (!(worker->flags & WORKER_NOT_RUNNING))
779 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
780}
781
782/**
783 * busy_worker_head - return the busy hash head for a work
784 * @gcwq: gcwq of interest
785 * @work: work to be hashed
786 *
787 * Return hash head of @gcwq for @work.
788 *
789 * CONTEXT:
790 * spin_lock_irq(gcwq->lock).
791 *
792 * RETURNS:
793 * Pointer to the hash head.
794 */
795static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
796 struct work_struct *work)
797{
798 const int base_shift = ilog2(sizeof(struct work_struct));
799 unsigned long v = (unsigned long)work;
800
801 /* simple shift and fold hash, do we need something better? */
802 v >>= base_shift;
803 v += v >> BUSY_WORKER_HASH_ORDER;
804 v &= BUSY_WORKER_HASH_MASK;
805
806 return &gcwq->busy_hash[v];
807}
808
809/**
810 * __find_worker_executing_work - find worker which is executing a work
811 * @gcwq: gcwq of interest
812 * @bwh: hash head as returned by busy_worker_head()
813 * @work: work to find worker for
814 *
815 * Find a worker which is executing @work on @gcwq. @bwh should be
816 * the hash head obtained by calling busy_worker_head() with the same
817 * work.
818 *
819 * CONTEXT:
820 * spin_lock_irq(gcwq->lock).
821 *
822 * RETURNS:
823 * Pointer to worker which is executing @work if found, NULL
824 * otherwise.
825 */
826static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
827 struct hlist_head *bwh,
828 struct work_struct *work)
829{
830 struct worker *worker;
831 struct hlist_node *tmp;
832
833 hlist_for_each_entry(worker, tmp, bwh, hentry)
834 if (worker->current_work == work)
835 return worker;
836 return NULL;
837}
838
839/**
840 * find_worker_executing_work - find worker which is executing a work
841 * @gcwq: gcwq of interest
842 * @work: work to find worker for
843 *
844 * Find a worker which is executing @work on @gcwq. This function is
845 * identical to __find_worker_executing_work() except that this
846 * function calculates @bwh itself.
847 *
848 * CONTEXT:
849 * spin_lock_irq(gcwq->lock).
850 *
851 * RETURNS:
852 * Pointer to worker which is executing @work if found, NULL
853 * otherwise.
854 */
855static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
856 struct work_struct *work)
857{
858 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
859 work);
860}
861
862/**
863 * gcwq_determine_ins_pos - find insertion position
864 * @gcwq: gcwq of interest
865 * @cwq: cwq a work is being queued for
866 *
867 * A work for @cwq is about to be queued on @gcwq, determine insertion
868 * position for the work. If @cwq is for HIGHPRI wq, the work is
869 * queued at the head of the queue but in FIFO order with respect to
870 * other HIGHPRI works; otherwise, at the end of the queue. This
871 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
872 * there are HIGHPRI works pending.
873 *
874 * CONTEXT:
875 * spin_lock_irq(gcwq->lock).
876 *
877 * RETURNS:
878 * Pointer to inserstion position.
879 */
880static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
881 struct cpu_workqueue_struct *cwq)
882{
883 struct work_struct *twork;
884
885 if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
886 return &gcwq->worklist;
887
888 list_for_each_entry(twork, &gcwq->worklist, entry) {
889 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
890
891 if (!(tcwq->wq->flags & WQ_HIGHPRI))
892 break;
893 }
894
895 gcwq->flags |= GCWQ_HIGHPRI_PENDING;
896 return &twork->entry;
897}
898
899/**
900 * insert_work - insert a work into gcwq
901 * @cwq: cwq @work belongs to
902 * @work: work to insert
903 * @head: insertion point
904 * @extra_flags: extra WORK_STRUCT_* flags to set
905 *
906 * Insert @work which belongs to @cwq into @gcwq after @head.
907 * @extra_flags is or'd to work_struct flags.
908 *
909 * CONTEXT:
910 * spin_lock_irq(gcwq->lock).
911 */
248static void insert_work(struct cpu_workqueue_struct *cwq, 912static void insert_work(struct cpu_workqueue_struct *cwq,
249 struct work_struct *work, struct list_head *head) 913 struct work_struct *work, struct list_head *head,
914 unsigned int extra_flags)
250{ 915{
251 trace_workqueue_insertion(cwq->thread, work); 916 struct global_cwq *gcwq = cwq->gcwq;
917
918 /* we own @work, set data and link */
919 set_work_cwq(work, cwq, extra_flags);
252 920
253 set_wq_data(work, cwq);
254 /* 921 /*
255 * Ensure that we get the right work->data if we see the 922 * Ensure that we get the right work->data if we see the
256 * result of list_add() below, see try_to_grab_pending(). 923 * result of list_add() below, see try_to_grab_pending().
257 */ 924 */
258 smp_wmb(); 925 smp_wmb();
926
259 list_add_tail(&work->entry, head); 927 list_add_tail(&work->entry, head);
260 wake_up(&cwq->more_work); 928
929 /*
930 * Ensure either worker_sched_deactivated() sees the above
931 * list_add_tail() or we see zero nr_running to avoid workers
932 * lying around lazily while there are works to be processed.
933 */
934 smp_mb();
935
936 if (__need_more_worker(gcwq))
937 wake_up_worker(gcwq);
261} 938}
262 939
263static void __queue_work(struct cpu_workqueue_struct *cwq, 940static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
264 struct work_struct *work) 941 struct work_struct *work)
265{ 942{
943 struct global_cwq *gcwq;
944 struct cpu_workqueue_struct *cwq;
945 struct list_head *worklist;
266 unsigned long flags; 946 unsigned long flags;
267 947
268 debug_work_activate(work); 948 debug_work_activate(work);
269 spin_lock_irqsave(&cwq->lock, flags); 949
270 insert_work(cwq, work, &cwq->worklist); 950 /* determine gcwq to use */
271 spin_unlock_irqrestore(&cwq->lock, flags); 951 if (!(wq->flags & WQ_UNBOUND)) {
952 struct global_cwq *last_gcwq;
953
954 if (unlikely(cpu == WORK_CPU_UNBOUND))
955 cpu = raw_smp_processor_id();
956
957 /*
958 * It's multi cpu. If @wq is non-reentrant and @work
959 * was previously on a different cpu, it might still
960 * be running there, in which case the work needs to
961 * be queued on that cpu to guarantee non-reentrance.
962 */
963 gcwq = get_gcwq(cpu);
964 if (wq->flags & WQ_NON_REENTRANT &&
965 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
966 struct worker *worker;
967
968 spin_lock_irqsave(&last_gcwq->lock, flags);
969
970 worker = find_worker_executing_work(last_gcwq, work);
971
972 if (worker && worker->current_cwq->wq == wq)
973 gcwq = last_gcwq;
974 else {
975 /* meh... not running there, queue here */
976 spin_unlock_irqrestore(&last_gcwq->lock, flags);
977 spin_lock_irqsave(&gcwq->lock, flags);
978 }
979 } else
980 spin_lock_irqsave(&gcwq->lock, flags);
981 } else {
982 gcwq = get_gcwq(WORK_CPU_UNBOUND);
983 spin_lock_irqsave(&gcwq->lock, flags);
984 }
985
986 /* gcwq determined, get cwq and queue */
987 cwq = get_cwq(gcwq->cpu, wq);
988
989 BUG_ON(!list_empty(&work->entry));
990
991 cwq->nr_in_flight[cwq->work_color]++;
992
993 if (likely(cwq->nr_active < cwq->max_active)) {
994 cwq->nr_active++;
995 worklist = gcwq_determine_ins_pos(gcwq, cwq);
996 } else
997 worklist = &cwq->delayed_works;
998
999 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
1000
1001 spin_unlock_irqrestore(&gcwq->lock, flags);
272} 1002}
273 1003
274/** 1004/**
@@ -308,9 +1038,8 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
308{ 1038{
309 int ret = 0; 1039 int ret = 0;
310 1040
311 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 1041 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
312 BUG_ON(!list_empty(&work->entry)); 1042 __queue_work(cpu, wq, work);
313 __queue_work(wq_per_cpu(wq, cpu), work);
314 ret = 1; 1043 ret = 1;
315 } 1044 }
316 return ret; 1045 return ret;
@@ -320,10 +1049,9 @@ EXPORT_SYMBOL_GPL(queue_work_on);
320static void delayed_work_timer_fn(unsigned long __data) 1049static void delayed_work_timer_fn(unsigned long __data)
321{ 1050{
322 struct delayed_work *dwork = (struct delayed_work *)__data; 1051 struct delayed_work *dwork = (struct delayed_work *)__data;
323 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 1052 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
324 struct workqueue_struct *wq = cwq->wq;
325 1053
326 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); 1054 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
327} 1055}
328 1056
329/** 1057/**
@@ -360,14 +1088,31 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
360 struct timer_list *timer = &dwork->timer; 1088 struct timer_list *timer = &dwork->timer;
361 struct work_struct *work = &dwork->work; 1089 struct work_struct *work = &dwork->work;
362 1090
363 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 1091 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1092 unsigned int lcpu;
1093
364 BUG_ON(timer_pending(timer)); 1094 BUG_ON(timer_pending(timer));
365 BUG_ON(!list_empty(&work->entry)); 1095 BUG_ON(!list_empty(&work->entry));
366 1096
367 timer_stats_timer_set_start_info(&dwork->timer); 1097 timer_stats_timer_set_start_info(&dwork->timer);
368 1098
369 /* This stores cwq for the moment, for the timer_fn */ 1099 /*
370 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); 1100 * This stores cwq for the moment, for the timer_fn.
1101 * Note that the work's gcwq is preserved to allow
1102 * reentrance detection for delayed works.
1103 */
1104 if (!(wq->flags & WQ_UNBOUND)) {
1105 struct global_cwq *gcwq = get_work_gcwq(work);
1106
1107 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1108 lcpu = gcwq->cpu;
1109 else
1110 lcpu = raw_smp_processor_id();
1111 } else
1112 lcpu = WORK_CPU_UNBOUND;
1113
1114 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1115
371 timer->expires = jiffies + delay; 1116 timer->expires = jiffies + delay;
372 timer->data = (unsigned long)dwork; 1117 timer->data = (unsigned long)dwork;
373 timer->function = delayed_work_timer_fn; 1118 timer->function = delayed_work_timer_fn;
@@ -382,80 +1127,878 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
382} 1127}
383EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1128EXPORT_SYMBOL_GPL(queue_delayed_work_on);
384 1129
385static void run_workqueue(struct cpu_workqueue_struct *cwq) 1130/**
1131 * worker_enter_idle - enter idle state
1132 * @worker: worker which is entering idle state
1133 *
1134 * @worker is entering idle state. Update stats and idle timer if
1135 * necessary.
1136 *
1137 * LOCKING:
1138 * spin_lock_irq(gcwq->lock).
1139 */
1140static void worker_enter_idle(struct worker *worker)
386{ 1141{
387 spin_lock_irq(&cwq->lock); 1142 struct global_cwq *gcwq = worker->gcwq;
388 while (!list_empty(&cwq->worklist)) { 1143
389 struct work_struct *work = list_entry(cwq->worklist.next, 1144 BUG_ON(worker->flags & WORKER_IDLE);
390 struct work_struct, entry); 1145 BUG_ON(!list_empty(&worker->entry) &&
391 work_func_t f = work->func; 1146 (worker->hentry.next || worker->hentry.pprev));
392#ifdef CONFIG_LOCKDEP 1147
1148 /* can't use worker_set_flags(), also called from start_worker() */
1149 worker->flags |= WORKER_IDLE;
1150 gcwq->nr_idle++;
1151 worker->last_active = jiffies;
1152
1153 /* idle_list is LIFO */
1154 list_add(&worker->entry, &gcwq->idle_list);
1155
1156 if (likely(!(worker->flags & WORKER_ROGUE))) {
1157 if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1158 mod_timer(&gcwq->idle_timer,
1159 jiffies + IDLE_WORKER_TIMEOUT);
1160 } else
1161 wake_up_all(&gcwq->trustee_wait);
1162
1163 /* sanity check nr_running */
1164 WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1165 atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1166}
1167
1168/**
1169 * worker_leave_idle - leave idle state
1170 * @worker: worker which is leaving idle state
1171 *
1172 * @worker is leaving idle state. Update stats.
1173 *
1174 * LOCKING:
1175 * spin_lock_irq(gcwq->lock).
1176 */
1177static void worker_leave_idle(struct worker *worker)
1178{
1179 struct global_cwq *gcwq = worker->gcwq;
1180
1181 BUG_ON(!(worker->flags & WORKER_IDLE));
1182 worker_clr_flags(worker, WORKER_IDLE);
1183 gcwq->nr_idle--;
1184 list_del_init(&worker->entry);
1185}
1186
1187/**
1188 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1189 * @worker: self
1190 *
1191 * Works which are scheduled while the cpu is online must at least be
1192 * scheduled to a worker which is bound to the cpu so that if they are
1193 * flushed from cpu callbacks while cpu is going down, they are
1194 * guaranteed to execute on the cpu.
1195 *
1196 * This function is to be used by rogue workers and rescuers to bind
1197 * themselves to the target cpu and may race with cpu going down or
1198 * coming online. kthread_bind() can't be used because it may put the
1199 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1200 * verbatim as it's best effort and blocking and gcwq may be
1201 * [dis]associated in the meantime.
1202 *
1203 * This function tries set_cpus_allowed() and locks gcwq and verifies
1204 * the binding against GCWQ_DISASSOCIATED which is set during
1205 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1206 * idle state or fetches works without dropping lock, it can guarantee
1207 * the scheduling requirement described in the first paragraph.
1208 *
1209 * CONTEXT:
1210 * Might sleep. Called without any lock but returns with gcwq->lock
1211 * held.
1212 *
1213 * RETURNS:
1214 * %true if the associated gcwq is online (@worker is successfully
1215 * bound), %false if offline.
1216 */
1217static bool worker_maybe_bind_and_lock(struct worker *worker)
1218{
1219 struct global_cwq *gcwq = worker->gcwq;
1220 struct task_struct *task = worker->task;
1221
1222 while (true) {
393 /* 1223 /*
394 * It is permissible to free the struct work_struct 1224 * The following call may fail, succeed or succeed
395 * from inside the function that is called from it, 1225 * without actually migrating the task to the cpu if
396 * this we need to take into account for lockdep too. 1226 * it races with cpu hotunplug operation. Verify
397 * To avoid bogus "held lock freed" warnings as well 1227 * against GCWQ_DISASSOCIATED.
398 * as problems when looking into work->lockdep_map,
399 * make a copy and use that here.
400 */ 1228 */
401 struct lockdep_map lockdep_map = work->lockdep_map; 1229 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
402#endif 1230 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
403 trace_workqueue_execution(cwq->thread, work); 1231
404 debug_work_deactivate(work); 1232 spin_lock_irq(&gcwq->lock);
405 cwq->current_work = work; 1233 if (gcwq->flags & GCWQ_DISASSOCIATED)
406 list_del_init(cwq->worklist.next); 1234 return false;
407 spin_unlock_irq(&cwq->lock); 1235 if (task_cpu(task) == gcwq->cpu &&
408 1236 cpumask_equal(&current->cpus_allowed,
409 BUG_ON(get_wq_data(work) != cwq); 1237 get_cpu_mask(gcwq->cpu)))
410 work_clear_pending(work); 1238 return true;
411 lock_map_acquire(&cwq->wq->lockdep_map); 1239 spin_unlock_irq(&gcwq->lock);
412 lock_map_acquire(&lockdep_map); 1240
413 f(work); 1241 /* CPU has come up inbetween, retry migration */
414 lock_map_release(&lockdep_map); 1242 cpu_relax();
415 lock_map_release(&cwq->wq->lockdep_map); 1243 }
416 1244}
417 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 1245
418 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 1246/*
419 "%s/0x%08x/%d\n", 1247 * Function for worker->rebind_work used to rebind rogue busy workers
420 current->comm, preempt_count(), 1248 * to the associated cpu which is coming back online. This is
421 task_pid_nr(current)); 1249 * scheduled by cpu up but can race with other cpu hotplug operations
422 printk(KERN_ERR " last function: "); 1250 * and may be executed twice without intervening cpu down.
423 print_symbol("%s\n", (unsigned long)f); 1251 */
424 debug_show_held_locks(current); 1252static void worker_rebind_fn(struct work_struct *work)
425 dump_stack(); 1253{
1254 struct worker *worker = container_of(work, struct worker, rebind_work);
1255 struct global_cwq *gcwq = worker->gcwq;
1256
1257 if (worker_maybe_bind_and_lock(worker))
1258 worker_clr_flags(worker, WORKER_REBIND);
1259
1260 spin_unlock_irq(&gcwq->lock);
1261}
1262
1263static struct worker *alloc_worker(void)
1264{
1265 struct worker *worker;
1266
1267 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1268 if (worker) {
1269 INIT_LIST_HEAD(&worker->entry);
1270 INIT_LIST_HEAD(&worker->scheduled);
1271 INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1272 /* on creation a worker is in !idle && prep state */
1273 worker->flags = WORKER_PREP;
1274 }
1275 return worker;
1276}
1277
1278/**
1279 * create_worker - create a new workqueue worker
1280 * @gcwq: gcwq the new worker will belong to
1281 * @bind: whether to set affinity to @cpu or not
1282 *
1283 * Create a new worker which is bound to @gcwq. The returned worker
1284 * can be started by calling start_worker() or destroyed using
1285 * destroy_worker().
1286 *
1287 * CONTEXT:
1288 * Might sleep. Does GFP_KERNEL allocations.
1289 *
1290 * RETURNS:
1291 * Pointer to the newly created worker.
1292 */
1293static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1294{
1295 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1296 struct worker *worker = NULL;
1297 int id = -1;
1298
1299 spin_lock_irq(&gcwq->lock);
1300 while (ida_get_new(&gcwq->worker_ida, &id)) {
1301 spin_unlock_irq(&gcwq->lock);
1302 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1303 goto fail;
1304 spin_lock_irq(&gcwq->lock);
1305 }
1306 spin_unlock_irq(&gcwq->lock);
1307
1308 worker = alloc_worker();
1309 if (!worker)
1310 goto fail;
1311
1312 worker->gcwq = gcwq;
1313 worker->id = id;
1314
1315 if (!on_unbound_cpu)
1316 worker->task = kthread_create(worker_thread, worker,
1317 "kworker/%u:%d", gcwq->cpu, id);
1318 else
1319 worker->task = kthread_create(worker_thread, worker,
1320 "kworker/u:%d", id);
1321 if (IS_ERR(worker->task))
1322 goto fail;
1323
1324 /*
1325 * A rogue worker will become a regular one if CPU comes
1326 * online later on. Make sure every worker has
1327 * PF_THREAD_BOUND set.
1328 */
1329 if (bind && !on_unbound_cpu)
1330 kthread_bind(worker->task, gcwq->cpu);
1331 else {
1332 worker->task->flags |= PF_THREAD_BOUND;
1333 if (on_unbound_cpu)
1334 worker->flags |= WORKER_UNBOUND;
1335 }
1336
1337 return worker;
1338fail:
1339 if (id >= 0) {
1340 spin_lock_irq(&gcwq->lock);
1341 ida_remove(&gcwq->worker_ida, id);
1342 spin_unlock_irq(&gcwq->lock);
1343 }
1344 kfree(worker);
1345 return NULL;
1346}
1347
1348/**
1349 * start_worker - start a newly created worker
1350 * @worker: worker to start
1351 *
1352 * Make the gcwq aware of @worker and start it.
1353 *
1354 * CONTEXT:
1355 * spin_lock_irq(gcwq->lock).
1356 */
1357static void start_worker(struct worker *worker)
1358{
1359 worker->flags |= WORKER_STARTED;
1360 worker->gcwq->nr_workers++;
1361 worker_enter_idle(worker);
1362 wake_up_process(worker->task);
1363}
1364
1365/**
1366 * destroy_worker - destroy a workqueue worker
1367 * @worker: worker to be destroyed
1368 *
1369 * Destroy @worker and adjust @gcwq stats accordingly.
1370 *
1371 * CONTEXT:
1372 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1373 */
1374static void destroy_worker(struct worker *worker)
1375{
1376 struct global_cwq *gcwq = worker->gcwq;
1377 int id = worker->id;
1378
1379 /* sanity check frenzy */
1380 BUG_ON(worker->current_work);
1381 BUG_ON(!list_empty(&worker->scheduled));
1382
1383 if (worker->flags & WORKER_STARTED)
1384 gcwq->nr_workers--;
1385 if (worker->flags & WORKER_IDLE)
1386 gcwq->nr_idle--;
1387
1388 list_del_init(&worker->entry);
1389 worker->flags |= WORKER_DIE;
1390
1391 spin_unlock_irq(&gcwq->lock);
1392
1393 kthread_stop(worker->task);
1394 kfree(worker);
1395
1396 spin_lock_irq(&gcwq->lock);
1397 ida_remove(&gcwq->worker_ida, id);
1398}
1399
1400static void idle_worker_timeout(unsigned long __gcwq)
1401{
1402 struct global_cwq *gcwq = (void *)__gcwq;
1403
1404 spin_lock_irq(&gcwq->lock);
1405
1406 if (too_many_workers(gcwq)) {
1407 struct worker *worker;
1408 unsigned long expires;
1409
1410 /* idle_list is kept in LIFO order, check the last one */
1411 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1412 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1413
1414 if (time_before(jiffies, expires))
1415 mod_timer(&gcwq->idle_timer, expires);
1416 else {
1417 /* it's been idle for too long, wake up manager */
1418 gcwq->flags |= GCWQ_MANAGE_WORKERS;
1419 wake_up_worker(gcwq);
426 } 1420 }
1421 }
427 1422
428 spin_lock_irq(&cwq->lock); 1423 spin_unlock_irq(&gcwq->lock);
429 cwq->current_work = NULL; 1424}
1425
1426static bool send_mayday(struct work_struct *work)
1427{
1428 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1429 struct workqueue_struct *wq = cwq->wq;
1430 unsigned int cpu;
1431
1432 if (!(wq->flags & WQ_RESCUER))
1433 return false;
1434
1435 /* mayday mayday mayday */
1436 cpu = cwq->gcwq->cpu;
1437 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1438 if (cpu == WORK_CPU_UNBOUND)
1439 cpu = 0;
1440 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1441 wake_up_process(wq->rescuer->task);
1442 return true;
1443}
1444
1445static void gcwq_mayday_timeout(unsigned long __gcwq)
1446{
1447 struct global_cwq *gcwq = (void *)__gcwq;
1448 struct work_struct *work;
1449
1450 spin_lock_irq(&gcwq->lock);
1451
1452 if (need_to_create_worker(gcwq)) {
1453 /*
1454 * We've been trying to create a new worker but
1455 * haven't been successful. We might be hitting an
1456 * allocation deadlock. Send distress signals to
1457 * rescuers.
1458 */
1459 list_for_each_entry(work, &gcwq->worklist, entry)
1460 send_mayday(work);
430 } 1461 }
431 spin_unlock_irq(&cwq->lock); 1462
1463 spin_unlock_irq(&gcwq->lock);
1464
1465 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
432} 1466}
433 1467
434static int worker_thread(void *__cwq) 1468/**
1469 * maybe_create_worker - create a new worker if necessary
1470 * @gcwq: gcwq to create a new worker for
1471 *
1472 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1473 * have at least one idle worker on return from this function. If
1474 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1475 * sent to all rescuers with works scheduled on @gcwq to resolve
1476 * possible allocation deadlock.
1477 *
1478 * On return, need_to_create_worker() is guaranteed to be false and
1479 * may_start_working() true.
1480 *
1481 * LOCKING:
1482 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1483 * multiple times. Does GFP_KERNEL allocations. Called only from
1484 * manager.
1485 *
1486 * RETURNS:
1487 * false if no action was taken and gcwq->lock stayed locked, true
1488 * otherwise.
1489 */
1490static bool maybe_create_worker(struct global_cwq *gcwq)
435{ 1491{
436 struct cpu_workqueue_struct *cwq = __cwq; 1492 if (!need_to_create_worker(gcwq))
437 DEFINE_WAIT(wait); 1493 return false;
1494restart:
1495 spin_unlock_irq(&gcwq->lock);
1496
1497 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1498 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1499
1500 while (true) {
1501 struct worker *worker;
1502
1503 worker = create_worker(gcwq, true);
1504 if (worker) {
1505 del_timer_sync(&gcwq->mayday_timer);
1506 spin_lock_irq(&gcwq->lock);
1507 start_worker(worker);
1508 BUG_ON(need_to_create_worker(gcwq));
1509 return true;
1510 }
1511
1512 if (!need_to_create_worker(gcwq))
1513 break;
438 1514
439 if (cwq->wq->freezeable) 1515 __set_current_state(TASK_INTERRUPTIBLE);
440 set_freezable(); 1516 schedule_timeout(CREATE_COOLDOWN);
441 1517
442 for (;;) { 1518 if (!need_to_create_worker(gcwq))
443 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 1519 break;
444 if (!freezing(current) && 1520 }
445 !kthread_should_stop() &&
446 list_empty(&cwq->worklist))
447 schedule();
448 finish_wait(&cwq->more_work, &wait);
449 1521
450 try_to_freeze(); 1522 del_timer_sync(&gcwq->mayday_timer);
1523 spin_lock_irq(&gcwq->lock);
1524 if (need_to_create_worker(gcwq))
1525 goto restart;
1526 return true;
1527}
451 1528
452 if (kthread_should_stop()) 1529/**
1530 * maybe_destroy_worker - destroy workers which have been idle for a while
1531 * @gcwq: gcwq to destroy workers for
1532 *
1533 * Destroy @gcwq workers which have been idle for longer than
1534 * IDLE_WORKER_TIMEOUT.
1535 *
1536 * LOCKING:
1537 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1538 * multiple times. Called only from manager.
1539 *
1540 * RETURNS:
1541 * false if no action was taken and gcwq->lock stayed locked, true
1542 * otherwise.
1543 */
1544static bool maybe_destroy_workers(struct global_cwq *gcwq)
1545{
1546 bool ret = false;
1547
1548 while (too_many_workers(gcwq)) {
1549 struct worker *worker;
1550 unsigned long expires;
1551
1552 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1553 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1554
1555 if (time_before(jiffies, expires)) {
1556 mod_timer(&gcwq->idle_timer, expires);
453 break; 1557 break;
1558 }
454 1559
455 run_workqueue(cwq); 1560 destroy_worker(worker);
1561 ret = true;
456 } 1562 }
457 1563
458 return 0; 1564 return ret;
1565}
1566
1567/**
1568 * manage_workers - manage worker pool
1569 * @worker: self
1570 *
1571 * Assume the manager role and manage gcwq worker pool @worker belongs
1572 * to. At any given time, there can be only zero or one manager per
1573 * gcwq. The exclusion is handled automatically by this function.
1574 *
1575 * The caller can safely start processing works on false return. On
1576 * true return, it's guaranteed that need_to_create_worker() is false
1577 * and may_start_working() is true.
1578 *
1579 * CONTEXT:
1580 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1581 * multiple times. Does GFP_KERNEL allocations.
1582 *
1583 * RETURNS:
1584 * false if no action was taken and gcwq->lock stayed locked, true if
1585 * some action was taken.
1586 */
1587static bool manage_workers(struct worker *worker)
1588{
1589 struct global_cwq *gcwq = worker->gcwq;
1590 bool ret = false;
1591
1592 if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1593 return ret;
1594
1595 gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1596 gcwq->flags |= GCWQ_MANAGING_WORKERS;
1597
1598 /*
1599 * Destroy and then create so that may_start_working() is true
1600 * on return.
1601 */
1602 ret |= maybe_destroy_workers(gcwq);
1603 ret |= maybe_create_worker(gcwq);
1604
1605 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1606
1607 /*
1608 * The trustee might be waiting to take over the manager
1609 * position, tell it we're done.
1610 */
1611 if (unlikely(gcwq->trustee))
1612 wake_up_all(&gcwq->trustee_wait);
1613
1614 return ret;
1615}
1616
1617/**
1618 * move_linked_works - move linked works to a list
1619 * @work: start of series of works to be scheduled
1620 * @head: target list to append @work to
1621 * @nextp: out paramter for nested worklist walking
1622 *
1623 * Schedule linked works starting from @work to @head. Work series to
1624 * be scheduled starts at @work and includes any consecutive work with
1625 * WORK_STRUCT_LINKED set in its predecessor.
1626 *
1627 * If @nextp is not NULL, it's updated to point to the next work of
1628 * the last scheduled work. This allows move_linked_works() to be
1629 * nested inside outer list_for_each_entry_safe().
1630 *
1631 * CONTEXT:
1632 * spin_lock_irq(gcwq->lock).
1633 */
1634static void move_linked_works(struct work_struct *work, struct list_head *head,
1635 struct work_struct **nextp)
1636{
1637 struct work_struct *n;
1638
1639 /*
1640 * Linked worklist will always end before the end of the list,
1641 * use NULL for list head.
1642 */
1643 list_for_each_entry_safe_from(work, n, NULL, entry) {
1644 list_move_tail(&work->entry, head);
1645 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1646 break;
1647 }
1648
1649 /*
1650 * If we're already inside safe list traversal and have moved
1651 * multiple works to the scheduled queue, the next position
1652 * needs to be updated.
1653 */
1654 if (nextp)
1655 *nextp = n;
1656}
1657
1658static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1659{
1660 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1661 struct work_struct, entry);
1662 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1663
1664 move_linked_works(work, pos, NULL);
1665 cwq->nr_active++;
1666}
1667
1668/**
1669 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1670 * @cwq: cwq of interest
1671 * @color: color of work which left the queue
1672 *
1673 * A work either has completed or is removed from pending queue,
1674 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1675 *
1676 * CONTEXT:
1677 * spin_lock_irq(gcwq->lock).
1678 */
1679static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1680{
1681 /* ignore uncolored works */
1682 if (color == WORK_NO_COLOR)
1683 return;
1684
1685 cwq->nr_in_flight[color]--;
1686 cwq->nr_active--;
1687
1688 if (!list_empty(&cwq->delayed_works)) {
1689 /* one down, submit a delayed one */
1690 if (cwq->nr_active < cwq->max_active)
1691 cwq_activate_first_delayed(cwq);
1692 }
1693
1694 /* is flush in progress and are we at the flushing tip? */
1695 if (likely(cwq->flush_color != color))
1696 return;
1697
1698 /* are there still in-flight works? */
1699 if (cwq->nr_in_flight[color])
1700 return;
1701
1702 /* this cwq is done, clear flush_color */
1703 cwq->flush_color = -1;
1704
1705 /*
1706 * If this was the last cwq, wake up the first flusher. It
1707 * will handle the rest.
1708 */
1709 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1710 complete(&cwq->wq->first_flusher->done);
1711}
1712
1713/**
1714 * process_one_work - process single work
1715 * @worker: self
1716 * @work: work to process
1717 *
1718 * Process @work. This function contains all the logics necessary to
1719 * process a single work including synchronization against and
1720 * interaction with other workers on the same cpu, queueing and
1721 * flushing. As long as context requirement is met, any worker can
1722 * call this function to process a work.
1723 *
1724 * CONTEXT:
1725 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1726 */
1727static void process_one_work(struct worker *worker, struct work_struct *work)
1728{
1729 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1730 struct global_cwq *gcwq = cwq->gcwq;
1731 struct hlist_head *bwh = busy_worker_head(gcwq, work);
1732 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1733 work_func_t f = work->func;
1734 int work_color;
1735 struct worker *collision;
1736#ifdef CONFIG_LOCKDEP
1737 /*
1738 * It is permissible to free the struct work_struct from
1739 * inside the function that is called from it, this we need to
1740 * take into account for lockdep too. To avoid bogus "held
1741 * lock freed" warnings as well as problems when looking into
1742 * work->lockdep_map, make a copy and use that here.
1743 */
1744 struct lockdep_map lockdep_map = work->lockdep_map;
1745#endif
1746 /*
1747 * A single work shouldn't be executed concurrently by
1748 * multiple workers on a single cpu. Check whether anyone is
1749 * already processing the work. If so, defer the work to the
1750 * currently executing one.
1751 */
1752 collision = __find_worker_executing_work(gcwq, bwh, work);
1753 if (unlikely(collision)) {
1754 move_linked_works(work, &collision->scheduled, NULL);
1755 return;
1756 }
1757
1758 /* claim and process */
1759 debug_work_deactivate(work);
1760 hlist_add_head(&worker->hentry, bwh);
1761 worker->current_work = work;
1762 worker->current_cwq = cwq;
1763 work_color = get_work_color(work);
1764
1765 /* record the current cpu number in the work data and dequeue */
1766 set_work_cpu(work, gcwq->cpu);
1767 list_del_init(&work->entry);
1768
1769 /*
1770 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1771 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1772 */
1773 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1774 struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1775 struct work_struct, entry);
1776
1777 if (!list_empty(&gcwq->worklist) &&
1778 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1779 wake_up_worker(gcwq);
1780 else
1781 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1782 }
1783
1784 /*
1785 * CPU intensive works don't participate in concurrency
1786 * management. They're the scheduler's responsibility.
1787 */
1788 if (unlikely(cpu_intensive))
1789 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1790
1791 spin_unlock_irq(&gcwq->lock);
1792
1793 work_clear_pending(work);
1794 lock_map_acquire(&cwq->wq->lockdep_map);
1795 lock_map_acquire(&lockdep_map);
1796 trace_workqueue_execute_start(work);
1797 f(work);
1798 /*
1799 * While we must be careful to not use "work" after this, the trace
1800 * point will only record its address.
1801 */
1802 trace_workqueue_execute_end(work);
1803 lock_map_release(&lockdep_map);
1804 lock_map_release(&cwq->wq->lockdep_map);
1805
1806 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1807 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1808 "%s/0x%08x/%d\n",
1809 current->comm, preempt_count(), task_pid_nr(current));
1810 printk(KERN_ERR " last function: ");
1811 print_symbol("%s\n", (unsigned long)f);
1812 debug_show_held_locks(current);
1813 dump_stack();
1814 }
1815
1816 spin_lock_irq(&gcwq->lock);
1817
1818 /* clear cpu intensive status */
1819 if (unlikely(cpu_intensive))
1820 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1821
1822 /* we're done with it, release */
1823 hlist_del_init(&worker->hentry);
1824 worker->current_work = NULL;
1825 worker->current_cwq = NULL;
1826 cwq_dec_nr_in_flight(cwq, work_color);
1827}
1828
1829/**
1830 * process_scheduled_works - process scheduled works
1831 * @worker: self
1832 *
1833 * Process all scheduled works. Please note that the scheduled list
1834 * may change while processing a work, so this function repeatedly
1835 * fetches a work from the top and executes it.
1836 *
1837 * CONTEXT:
1838 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1839 * multiple times.
1840 */
1841static void process_scheduled_works(struct worker *worker)
1842{
1843 while (!list_empty(&worker->scheduled)) {
1844 struct work_struct *work = list_first_entry(&worker->scheduled,
1845 struct work_struct, entry);
1846 process_one_work(worker, work);
1847 }
1848}
1849
1850/**
1851 * worker_thread - the worker thread function
1852 * @__worker: self
1853 *
1854 * The gcwq worker thread function. There's a single dynamic pool of
1855 * these per each cpu. These workers process all works regardless of
1856 * their specific target workqueue. The only exception is works which
1857 * belong to workqueues with a rescuer which will be explained in
1858 * rescuer_thread().
1859 */
1860static int worker_thread(void *__worker)
1861{
1862 struct worker *worker = __worker;
1863 struct global_cwq *gcwq = worker->gcwq;
1864
1865 /* tell the scheduler that this is a workqueue worker */
1866 worker->task->flags |= PF_WQ_WORKER;
1867woke_up:
1868 spin_lock_irq(&gcwq->lock);
1869
1870 /* DIE can be set only while we're idle, checking here is enough */
1871 if (worker->flags & WORKER_DIE) {
1872 spin_unlock_irq(&gcwq->lock);
1873 worker->task->flags &= ~PF_WQ_WORKER;
1874 return 0;
1875 }
1876
1877 worker_leave_idle(worker);
1878recheck:
1879 /* no more worker necessary? */
1880 if (!need_more_worker(gcwq))
1881 goto sleep;
1882
1883 /* do we need to manage? */
1884 if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1885 goto recheck;
1886
1887 /*
1888 * ->scheduled list can only be filled while a worker is
1889 * preparing to process a work or actually processing it.
1890 * Make sure nobody diddled with it while I was sleeping.
1891 */
1892 BUG_ON(!list_empty(&worker->scheduled));
1893
1894 /*
1895 * When control reaches this point, we're guaranteed to have
1896 * at least one idle worker or that someone else has already
1897 * assumed the manager role.
1898 */
1899 worker_clr_flags(worker, WORKER_PREP);
1900
1901 do {
1902 struct work_struct *work =
1903 list_first_entry(&gcwq->worklist,
1904 struct work_struct, entry);
1905
1906 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1907 /* optimization path, not strictly necessary */
1908 process_one_work(worker, work);
1909 if (unlikely(!list_empty(&worker->scheduled)))
1910 process_scheduled_works(worker);
1911 } else {
1912 move_linked_works(work, &worker->scheduled, NULL);
1913 process_scheduled_works(worker);
1914 }
1915 } while (keep_working(gcwq));
1916
1917 worker_set_flags(worker, WORKER_PREP, false);
1918sleep:
1919 if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1920 goto recheck;
1921
1922 /*
1923 * gcwq->lock is held and there's no work to process and no
1924 * need to manage, sleep. Workers are woken up only while
1925 * holding gcwq->lock or from local cpu, so setting the
1926 * current state before releasing gcwq->lock is enough to
1927 * prevent losing any event.
1928 */
1929 worker_enter_idle(worker);
1930 __set_current_state(TASK_INTERRUPTIBLE);
1931 spin_unlock_irq(&gcwq->lock);
1932 schedule();
1933 goto woke_up;
1934}
1935
1936/**
1937 * rescuer_thread - the rescuer thread function
1938 * @__wq: the associated workqueue
1939 *
1940 * Workqueue rescuer thread function. There's one rescuer for each
1941 * workqueue which has WQ_RESCUER set.
1942 *
1943 * Regular work processing on a gcwq may block trying to create a new
1944 * worker which uses GFP_KERNEL allocation which has slight chance of
1945 * developing into deadlock if some works currently on the same queue
1946 * need to be processed to satisfy the GFP_KERNEL allocation. This is
1947 * the problem rescuer solves.
1948 *
1949 * When such condition is possible, the gcwq summons rescuers of all
1950 * workqueues which have works queued on the gcwq and let them process
1951 * those works so that forward progress can be guaranteed.
1952 *
1953 * This should happen rarely.
1954 */
1955static int rescuer_thread(void *__wq)
1956{
1957 struct workqueue_struct *wq = __wq;
1958 struct worker *rescuer = wq->rescuer;
1959 struct list_head *scheduled = &rescuer->scheduled;
1960 bool is_unbound = wq->flags & WQ_UNBOUND;
1961 unsigned int cpu;
1962
1963 set_user_nice(current, RESCUER_NICE_LEVEL);
1964repeat:
1965 set_current_state(TASK_INTERRUPTIBLE);
1966
1967 if (kthread_should_stop())
1968 return 0;
1969
1970 /*
1971 * See whether any cpu is asking for help. Unbounded
1972 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1973 */
1974 for_each_mayday_cpu(cpu, wq->mayday_mask) {
1975 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
1976 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
1977 struct global_cwq *gcwq = cwq->gcwq;
1978 struct work_struct *work, *n;
1979
1980 __set_current_state(TASK_RUNNING);
1981 mayday_clear_cpu(cpu, wq->mayday_mask);
1982
1983 /* migrate to the target cpu if possible */
1984 rescuer->gcwq = gcwq;
1985 worker_maybe_bind_and_lock(rescuer);
1986
1987 /*
1988 * Slurp in all works issued via this workqueue and
1989 * process'em.
1990 */
1991 BUG_ON(!list_empty(&rescuer->scheduled));
1992 list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
1993 if (get_work_cwq(work) == cwq)
1994 move_linked_works(work, scheduled, &n);
1995
1996 process_scheduled_works(rescuer);
1997 spin_unlock_irq(&gcwq->lock);
1998 }
1999
2000 schedule();
2001 goto repeat;
459} 2002}
460 2003
461struct wq_barrier { 2004struct wq_barrier {
@@ -469,44 +2012,137 @@ static void wq_barrier_func(struct work_struct *work)
469 complete(&barr->done); 2012 complete(&barr->done);
470} 2013}
471 2014
2015/**
2016 * insert_wq_barrier - insert a barrier work
2017 * @cwq: cwq to insert barrier into
2018 * @barr: wq_barrier to insert
2019 * @target: target work to attach @barr to
2020 * @worker: worker currently executing @target, NULL if @target is not executing
2021 *
2022 * @barr is linked to @target such that @barr is completed only after
2023 * @target finishes execution. Please note that the ordering
2024 * guarantee is observed only with respect to @target and on the local
2025 * cpu.
2026 *
2027 * Currently, a queued barrier can't be canceled. This is because
2028 * try_to_grab_pending() can't determine whether the work to be
2029 * grabbed is at the head of the queue and thus can't clear LINKED
2030 * flag of the previous work while there must be a valid next work
2031 * after a work with LINKED flag set.
2032 *
2033 * Note that when @worker is non-NULL, @target may be modified
2034 * underneath us, so we can't reliably determine cwq from @target.
2035 *
2036 * CONTEXT:
2037 * spin_lock_irq(gcwq->lock).
2038 */
472static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 2039static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
473 struct wq_barrier *barr, struct list_head *head) 2040 struct wq_barrier *barr,
2041 struct work_struct *target, struct worker *worker)
474{ 2042{
2043 struct list_head *head;
2044 unsigned int linked = 0;
2045
475 /* 2046 /*
476 * debugobject calls are safe here even with cwq->lock locked 2047 * debugobject calls are safe here even with gcwq->lock locked
477 * as we know for sure that this will not trigger any of the 2048 * as we know for sure that this will not trigger any of the
478 * checks and call back into the fixup functions where we 2049 * checks and call back into the fixup functions where we
479 * might deadlock. 2050 * might deadlock.
480 */ 2051 */
481 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); 2052 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
482 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 2053 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
483
484 init_completion(&barr->done); 2054 init_completion(&barr->done);
485 2055
2056 /*
2057 * If @target is currently being executed, schedule the
2058 * barrier to the worker; otherwise, put it after @target.
2059 */
2060 if (worker)
2061 head = worker->scheduled.next;
2062 else {
2063 unsigned long *bits = work_data_bits(target);
2064
2065 head = target->entry.next;
2066 /* there can already be other linked works, inherit and set */
2067 linked = *bits & WORK_STRUCT_LINKED;
2068 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2069 }
2070
486 debug_work_activate(&barr->work); 2071 debug_work_activate(&barr->work);
487 insert_work(cwq, &barr->work, head); 2072 insert_work(cwq, &barr->work, head,
2073 work_color_to_flags(WORK_NO_COLOR) | linked);
488} 2074}
489 2075
490static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 2076/**
2077 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2078 * @wq: workqueue being flushed
2079 * @flush_color: new flush color, < 0 for no-op
2080 * @work_color: new work color, < 0 for no-op
2081 *
2082 * Prepare cwqs for workqueue flushing.
2083 *
2084 * If @flush_color is non-negative, flush_color on all cwqs should be
2085 * -1. If no cwq has in-flight commands at the specified color, all
2086 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2087 * has in flight commands, its cwq->flush_color is set to
2088 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2089 * wakeup logic is armed and %true is returned.
2090 *
2091 * The caller should have initialized @wq->first_flusher prior to
2092 * calling this function with non-negative @flush_color. If
2093 * @flush_color is negative, no flush color update is done and %false
2094 * is returned.
2095 *
2096 * If @work_color is non-negative, all cwqs should have the same
2097 * work_color which is previous to @work_color and all will be
2098 * advanced to @work_color.
2099 *
2100 * CONTEXT:
2101 * mutex_lock(wq->flush_mutex).
2102 *
2103 * RETURNS:
2104 * %true if @flush_color >= 0 and there's something to flush. %false
2105 * otherwise.
2106 */
2107static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2108 int flush_color, int work_color)
491{ 2109{
492 int active = 0; 2110 bool wait = false;
493 struct wq_barrier barr; 2111 unsigned int cpu;
494
495 WARN_ON(cwq->thread == current);
496 2112
497 spin_lock_irq(&cwq->lock); 2113 if (flush_color >= 0) {
498 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 2114 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
499 insert_wq_barrier(cwq, &barr, &cwq->worklist); 2115 atomic_set(&wq->nr_cwqs_to_flush, 1);
500 active = 1;
501 } 2116 }
502 spin_unlock_irq(&cwq->lock);
503 2117
504 if (active) { 2118 for_each_cwq_cpu(cpu, wq) {
505 wait_for_completion(&barr.done); 2119 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
506 destroy_work_on_stack(&barr.work); 2120 struct global_cwq *gcwq = cwq->gcwq;
2121
2122 spin_lock_irq(&gcwq->lock);
2123
2124 if (flush_color >= 0) {
2125 BUG_ON(cwq->flush_color != -1);
2126
2127 if (cwq->nr_in_flight[flush_color]) {
2128 cwq->flush_color = flush_color;
2129 atomic_inc(&wq->nr_cwqs_to_flush);
2130 wait = true;
2131 }
2132 }
2133
2134 if (work_color >= 0) {
2135 BUG_ON(work_color != work_next_color(cwq->work_color));
2136 cwq->work_color = work_color;
2137 }
2138
2139 spin_unlock_irq(&gcwq->lock);
507 } 2140 }
508 2141
509 return active; 2142 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2143 complete(&wq->first_flusher->done);
2144
2145 return wait;
510} 2146}
511 2147
512/** 2148/**
@@ -518,20 +2154,150 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
518 * 2154 *
519 * We sleep until all works which were queued on entry have been handled, 2155 * We sleep until all works which were queued on entry have been handled,
520 * but we are not livelocked by new incoming ones. 2156 * but we are not livelocked by new incoming ones.
521 *
522 * This function used to run the workqueues itself. Now we just wait for the
523 * helper threads to do it.
524 */ 2157 */
525void flush_workqueue(struct workqueue_struct *wq) 2158void flush_workqueue(struct workqueue_struct *wq)
526{ 2159{
527 const struct cpumask *cpu_map = wq_cpu_map(wq); 2160 struct wq_flusher this_flusher = {
528 int cpu; 2161 .list = LIST_HEAD_INIT(this_flusher.list),
2162 .flush_color = -1,
2163 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2164 };
2165 int next_color;
529 2166
530 might_sleep();
531 lock_map_acquire(&wq->lockdep_map); 2167 lock_map_acquire(&wq->lockdep_map);
532 lock_map_release(&wq->lockdep_map); 2168 lock_map_release(&wq->lockdep_map);
533 for_each_cpu(cpu, cpu_map) 2169
534 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 2170 mutex_lock(&wq->flush_mutex);
2171
2172 /*
2173 * Start-to-wait phase
2174 */
2175 next_color = work_next_color(wq->work_color);
2176
2177 if (next_color != wq->flush_color) {
2178 /*
2179 * Color space is not full. The current work_color
2180 * becomes our flush_color and work_color is advanced
2181 * by one.
2182 */
2183 BUG_ON(!list_empty(&wq->flusher_overflow));
2184 this_flusher.flush_color = wq->work_color;
2185 wq->work_color = next_color;
2186
2187 if (!wq->first_flusher) {
2188 /* no flush in progress, become the first flusher */
2189 BUG_ON(wq->flush_color != this_flusher.flush_color);
2190
2191 wq->first_flusher = &this_flusher;
2192
2193 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2194 wq->work_color)) {
2195 /* nothing to flush, done */
2196 wq->flush_color = next_color;
2197 wq->first_flusher = NULL;
2198 goto out_unlock;
2199 }
2200 } else {
2201 /* wait in queue */
2202 BUG_ON(wq->flush_color == this_flusher.flush_color);
2203 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2204 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2205 }
2206 } else {
2207 /*
2208 * Oops, color space is full, wait on overflow queue.
2209 * The next flush completion will assign us
2210 * flush_color and transfer to flusher_queue.
2211 */
2212 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2213 }
2214
2215 mutex_unlock(&wq->flush_mutex);
2216
2217 wait_for_completion(&this_flusher.done);
2218
2219 /*
2220 * Wake-up-and-cascade phase
2221 *
2222 * First flushers are responsible for cascading flushes and
2223 * handling overflow. Non-first flushers can simply return.
2224 */
2225 if (wq->first_flusher != &this_flusher)
2226 return;
2227
2228 mutex_lock(&wq->flush_mutex);
2229
2230 /* we might have raced, check again with mutex held */
2231 if (wq->first_flusher != &this_flusher)
2232 goto out_unlock;
2233
2234 wq->first_flusher = NULL;
2235
2236 BUG_ON(!list_empty(&this_flusher.list));
2237 BUG_ON(wq->flush_color != this_flusher.flush_color);
2238
2239 while (true) {
2240 struct wq_flusher *next, *tmp;
2241
2242 /* complete all the flushers sharing the current flush color */
2243 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2244 if (next->flush_color != wq->flush_color)
2245 break;
2246 list_del_init(&next->list);
2247 complete(&next->done);
2248 }
2249
2250 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2251 wq->flush_color != work_next_color(wq->work_color));
2252
2253 /* this flush_color is finished, advance by one */
2254 wq->flush_color = work_next_color(wq->flush_color);
2255
2256 /* one color has been freed, handle overflow queue */
2257 if (!list_empty(&wq->flusher_overflow)) {
2258 /*
2259 * Assign the same color to all overflowed
2260 * flushers, advance work_color and append to
2261 * flusher_queue. This is the start-to-wait
2262 * phase for these overflowed flushers.
2263 */
2264 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2265 tmp->flush_color = wq->work_color;
2266
2267 wq->work_color = work_next_color(wq->work_color);
2268
2269 list_splice_tail_init(&wq->flusher_overflow,
2270 &wq->flusher_queue);
2271 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2272 }
2273
2274 if (list_empty(&wq->flusher_queue)) {
2275 BUG_ON(wq->flush_color != wq->work_color);
2276 break;
2277 }
2278
2279 /*
2280 * Need to flush more colors. Make the next flusher
2281 * the new first flusher and arm cwqs.
2282 */
2283 BUG_ON(wq->flush_color == wq->work_color);
2284 BUG_ON(wq->flush_color != next->flush_color);
2285
2286 list_del_init(&next->list);
2287 wq->first_flusher = next;
2288
2289 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2290 break;
2291
2292 /*
2293 * Meh... this color is already done, clear first
2294 * flusher and repeat cascading.
2295 */
2296 wq->first_flusher = NULL;
2297 }
2298
2299out_unlock:
2300 mutex_unlock(&wq->flush_mutex);
535} 2301}
536EXPORT_SYMBOL_GPL(flush_workqueue); 2302EXPORT_SYMBOL_GPL(flush_workqueue);
537 2303
@@ -547,43 +2313,46 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
547 */ 2313 */
548int flush_work(struct work_struct *work) 2314int flush_work(struct work_struct *work)
549{ 2315{
2316 struct worker *worker = NULL;
2317 struct global_cwq *gcwq;
550 struct cpu_workqueue_struct *cwq; 2318 struct cpu_workqueue_struct *cwq;
551 struct list_head *prev;
552 struct wq_barrier barr; 2319 struct wq_barrier barr;
553 2320
554 might_sleep(); 2321 might_sleep();
555 cwq = get_wq_data(work); 2322 gcwq = get_work_gcwq(work);
556 if (!cwq) 2323 if (!gcwq)
557 return 0; 2324 return 0;
558 2325
559 lock_map_acquire(&cwq->wq->lockdep_map); 2326 spin_lock_irq(&gcwq->lock);
560 lock_map_release(&cwq->wq->lockdep_map);
561
562 prev = NULL;
563 spin_lock_irq(&cwq->lock);
564 if (!list_empty(&work->entry)) { 2327 if (!list_empty(&work->entry)) {
565 /* 2328 /*
566 * See the comment near try_to_grab_pending()->smp_rmb(). 2329 * See the comment near try_to_grab_pending()->smp_rmb().
567 * If it was re-queued under us we are not going to wait. 2330 * If it was re-queued to a different gcwq under us, we
2331 * are not going to wait.
568 */ 2332 */
569 smp_rmb(); 2333 smp_rmb();
570 if (unlikely(cwq != get_wq_data(work))) 2334 cwq = get_work_cwq(work);
571 goto out; 2335 if (unlikely(!cwq || gcwq != cwq->gcwq))
572 prev = &work->entry; 2336 goto already_gone;
573 } else { 2337 } else {
574 if (cwq->current_work != work) 2338 worker = find_worker_executing_work(gcwq, work);
575 goto out; 2339 if (!worker)
576 prev = &cwq->worklist; 2340 goto already_gone;
2341 cwq = worker->current_cwq;
577 } 2342 }
578 insert_wq_barrier(cwq, &barr, prev->next); 2343
579out: 2344 insert_wq_barrier(cwq, &barr, work, worker);
580 spin_unlock_irq(&cwq->lock); 2345 spin_unlock_irq(&gcwq->lock);
581 if (!prev) 2346
582 return 0; 2347 lock_map_acquire(&cwq->wq->lockdep_map);
2348 lock_map_release(&cwq->wq->lockdep_map);
583 2349
584 wait_for_completion(&barr.done); 2350 wait_for_completion(&barr.done);
585 destroy_work_on_stack(&barr.work); 2351 destroy_work_on_stack(&barr.work);
586 return 1; 2352 return 1;
2353already_gone:
2354 spin_unlock_irq(&gcwq->lock);
2355 return 0;
587} 2356}
588EXPORT_SYMBOL_GPL(flush_work); 2357EXPORT_SYMBOL_GPL(flush_work);
589 2358
@@ -593,54 +2362,55 @@ EXPORT_SYMBOL_GPL(flush_work);
593 */ 2362 */
594static int try_to_grab_pending(struct work_struct *work) 2363static int try_to_grab_pending(struct work_struct *work)
595{ 2364{
596 struct cpu_workqueue_struct *cwq; 2365 struct global_cwq *gcwq;
597 int ret = -1; 2366 int ret = -1;
598 2367
599 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) 2368 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
600 return 0; 2369 return 0;
601 2370
602 /* 2371 /*
603 * The queueing is in progress, or it is already queued. Try to 2372 * The queueing is in progress, or it is already queued. Try to
604 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 2373 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
605 */ 2374 */
606 2375 gcwq = get_work_gcwq(work);
607 cwq = get_wq_data(work); 2376 if (!gcwq)
608 if (!cwq)
609 return ret; 2377 return ret;
610 2378
611 spin_lock_irq(&cwq->lock); 2379 spin_lock_irq(&gcwq->lock);
612 if (!list_empty(&work->entry)) { 2380 if (!list_empty(&work->entry)) {
613 /* 2381 /*
614 * This work is queued, but perhaps we locked the wrong cwq. 2382 * This work is queued, but perhaps we locked the wrong gcwq.
615 * In that case we must see the new value after rmb(), see 2383 * In that case we must see the new value after rmb(), see
616 * insert_work()->wmb(). 2384 * insert_work()->wmb().
617 */ 2385 */
618 smp_rmb(); 2386 smp_rmb();
619 if (cwq == get_wq_data(work)) { 2387 if (gcwq == get_work_gcwq(work)) {
620 debug_work_deactivate(work); 2388 debug_work_deactivate(work);
621 list_del_init(&work->entry); 2389 list_del_init(&work->entry);
2390 cwq_dec_nr_in_flight(get_work_cwq(work),
2391 get_work_color(work));
622 ret = 1; 2392 ret = 1;
623 } 2393 }
624 } 2394 }
625 spin_unlock_irq(&cwq->lock); 2395 spin_unlock_irq(&gcwq->lock);
626 2396
627 return ret; 2397 return ret;
628} 2398}
629 2399
630static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 2400static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
631 struct work_struct *work)
632{ 2401{
633 struct wq_barrier barr; 2402 struct wq_barrier barr;
634 int running = 0; 2403 struct worker *worker;
635 2404
636 spin_lock_irq(&cwq->lock); 2405 spin_lock_irq(&gcwq->lock);
637 if (unlikely(cwq->current_work == work)) {
638 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
639 running = 1;
640 }
641 spin_unlock_irq(&cwq->lock);
642 2406
643 if (unlikely(running)) { 2407 worker = find_worker_executing_work(gcwq, work);
2408 if (unlikely(worker))
2409 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2410
2411 spin_unlock_irq(&gcwq->lock);
2412
2413 if (unlikely(worker)) {
644 wait_for_completion(&barr.done); 2414 wait_for_completion(&barr.done);
645 destroy_work_on_stack(&barr.work); 2415 destroy_work_on_stack(&barr.work);
646 } 2416 }
@@ -648,9 +2418,6 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
648 2418
649static void wait_on_work(struct work_struct *work) 2419static void wait_on_work(struct work_struct *work)
650{ 2420{
651 struct cpu_workqueue_struct *cwq;
652 struct workqueue_struct *wq;
653 const struct cpumask *cpu_map;
654 int cpu; 2421 int cpu;
655 2422
656 might_sleep(); 2423 might_sleep();
@@ -658,15 +2425,8 @@ static void wait_on_work(struct work_struct *work)
658 lock_map_acquire(&work->lockdep_map); 2425 lock_map_acquire(&work->lockdep_map);
659 lock_map_release(&work->lockdep_map); 2426 lock_map_release(&work->lockdep_map);
660 2427
661 cwq = get_wq_data(work); 2428 for_each_gcwq_cpu(cpu)
662 if (!cwq) 2429 wait_on_cpu_work(get_gcwq(cpu), work);
663 return;
664
665 wq = cwq->wq;
666 cpu_map = wq_cpu_map(wq);
667
668 for_each_cpu(cpu, cpu_map)
669 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
670} 2430}
671 2431
672static int __cancel_work_timer(struct work_struct *work, 2432static int __cancel_work_timer(struct work_struct *work,
@@ -681,7 +2441,7 @@ static int __cancel_work_timer(struct work_struct *work,
681 wait_on_work(work); 2441 wait_on_work(work);
682 } while (unlikely(ret < 0)); 2442 } while (unlikely(ret < 0));
683 2443
684 clear_wq_data(work); 2444 clear_work_data(work);
685 return ret; 2445 return ret;
686} 2446}
687 2447
@@ -727,8 +2487,6 @@ int cancel_delayed_work_sync(struct delayed_work *dwork)
727} 2487}
728EXPORT_SYMBOL(cancel_delayed_work_sync); 2488EXPORT_SYMBOL(cancel_delayed_work_sync);
729 2489
730static struct workqueue_struct *keventd_wq __read_mostly;
731
732/** 2490/**
733 * schedule_work - put work task in global workqueue 2491 * schedule_work - put work task in global workqueue
734 * @work: job to be done 2492 * @work: job to be done
@@ -742,7 +2500,7 @@ static struct workqueue_struct *keventd_wq __read_mostly;
742 */ 2500 */
743int schedule_work(struct work_struct *work) 2501int schedule_work(struct work_struct *work)
744{ 2502{
745 return queue_work(keventd_wq, work); 2503 return queue_work(system_wq, work);
746} 2504}
747EXPORT_SYMBOL(schedule_work); 2505EXPORT_SYMBOL(schedule_work);
748 2506
@@ -755,7 +2513,7 @@ EXPORT_SYMBOL(schedule_work);
755 */ 2513 */
756int schedule_work_on(int cpu, struct work_struct *work) 2514int schedule_work_on(int cpu, struct work_struct *work)
757{ 2515{
758 return queue_work_on(cpu, keventd_wq, work); 2516 return queue_work_on(cpu, system_wq, work);
759} 2517}
760EXPORT_SYMBOL(schedule_work_on); 2518EXPORT_SYMBOL(schedule_work_on);
761 2519
@@ -770,7 +2528,7 @@ EXPORT_SYMBOL(schedule_work_on);
770int schedule_delayed_work(struct delayed_work *dwork, 2528int schedule_delayed_work(struct delayed_work *dwork,
771 unsigned long delay) 2529 unsigned long delay)
772{ 2530{
773 return queue_delayed_work(keventd_wq, dwork, delay); 2531 return queue_delayed_work(system_wq, dwork, delay);
774} 2532}
775EXPORT_SYMBOL(schedule_delayed_work); 2533EXPORT_SYMBOL(schedule_delayed_work);
776 2534
@@ -783,9 +2541,8 @@ EXPORT_SYMBOL(schedule_delayed_work);
783void flush_delayed_work(struct delayed_work *dwork) 2541void flush_delayed_work(struct delayed_work *dwork)
784{ 2542{
785 if (del_timer_sync(&dwork->timer)) { 2543 if (del_timer_sync(&dwork->timer)) {
786 struct cpu_workqueue_struct *cwq; 2544 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
787 cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu()); 2545 &dwork->work);
788 __queue_work(cwq, &dwork->work);
789 put_cpu(); 2546 put_cpu();
790 } 2547 }
791 flush_work(&dwork->work); 2548 flush_work(&dwork->work);
@@ -804,7 +2561,7 @@ EXPORT_SYMBOL(flush_delayed_work);
804int schedule_delayed_work_on(int cpu, 2561int schedule_delayed_work_on(int cpu,
805 struct delayed_work *dwork, unsigned long delay) 2562 struct delayed_work *dwork, unsigned long delay)
806{ 2563{
807 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 2564 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
808} 2565}
809EXPORT_SYMBOL(schedule_delayed_work_on); 2566EXPORT_SYMBOL(schedule_delayed_work_on);
810 2567
@@ -820,8 +2577,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
820int schedule_on_each_cpu(work_func_t func) 2577int schedule_on_each_cpu(work_func_t func)
821{ 2578{
822 int cpu; 2579 int cpu;
823 int orig = -1; 2580 struct work_struct __percpu *works;
824 struct work_struct *works;
825 2581
826 works = alloc_percpu(struct work_struct); 2582 works = alloc_percpu(struct work_struct);
827 if (!works) 2583 if (!works)
@@ -829,23 +2585,12 @@ int schedule_on_each_cpu(work_func_t func)
829 2585
830 get_online_cpus(); 2586 get_online_cpus();
831 2587
832 /*
833 * When running in keventd don't schedule a work item on
834 * itself. Can just call directly because the work queue is
835 * already bound. This also is faster.
836 */
837 if (current_is_keventd())
838 orig = raw_smp_processor_id();
839
840 for_each_online_cpu(cpu) { 2588 for_each_online_cpu(cpu) {
841 struct work_struct *work = per_cpu_ptr(works, cpu); 2589 struct work_struct *work = per_cpu_ptr(works, cpu);
842 2590
843 INIT_WORK(work, func); 2591 INIT_WORK(work, func);
844 if (cpu != orig) 2592 schedule_work_on(cpu, work);
845 schedule_work_on(cpu, work);
846 } 2593 }
847 if (orig >= 0)
848 func(per_cpu_ptr(works, orig));
849 2594
850 for_each_online_cpu(cpu) 2595 for_each_online_cpu(cpu)
851 flush_work(per_cpu_ptr(works, cpu)); 2596 flush_work(per_cpu_ptr(works, cpu));
@@ -881,7 +2626,7 @@ int schedule_on_each_cpu(work_func_t func)
881 */ 2626 */
882void flush_scheduled_work(void) 2627void flush_scheduled_work(void)
883{ 2628{
884 flush_workqueue(keventd_wq); 2629 flush_workqueue(system_wq);
885} 2630}
886EXPORT_SYMBOL(flush_scheduled_work); 2631EXPORT_SYMBOL(flush_scheduled_work);
887 2632
@@ -913,170 +2658,170 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
913 2658
914int keventd_up(void) 2659int keventd_up(void)
915{ 2660{
916 return keventd_wq != NULL; 2661 return system_wq != NULL;
917} 2662}
918 2663
919int current_is_keventd(void) 2664static int alloc_cwqs(struct workqueue_struct *wq)
920{ 2665{
921 struct cpu_workqueue_struct *cwq; 2666 /*
922 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 2667 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
923 int ret = 0; 2668 * Make sure that the alignment isn't lower than that of
924 2669 * unsigned long long.
925 BUG_ON(!keventd_wq); 2670 */
2671 const size_t size = sizeof(struct cpu_workqueue_struct);
2672 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2673 __alignof__(unsigned long long));
2674#ifdef CONFIG_SMP
2675 bool percpu = !(wq->flags & WQ_UNBOUND);
2676#else
2677 bool percpu = false;
2678#endif
926 2679
927 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 2680 if (percpu)
928 if (current == cwq->thread) 2681 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
929 ret = 1; 2682 else {
2683 void *ptr;
930 2684
931 return ret; 2685 /*
2686 * Allocate enough room to align cwq and put an extra
2687 * pointer at the end pointing back to the originally
2688 * allocated pointer which will be used for free.
2689 */
2690 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2691 if (ptr) {
2692 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2693 *(void **)(wq->cpu_wq.single + 1) = ptr;
2694 }
2695 }
932 2696
2697 /* just in case, make sure it's actually aligned */
2698 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2699 return wq->cpu_wq.v ? 0 : -ENOMEM;
933} 2700}
934 2701
935static struct cpu_workqueue_struct * 2702static void free_cwqs(struct workqueue_struct *wq)
936init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
937{ 2703{
938 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 2704#ifdef CONFIG_SMP
939 2705 bool percpu = !(wq->flags & WQ_UNBOUND);
940 cwq->wq = wq; 2706#else
941 spin_lock_init(&cwq->lock); 2707 bool percpu = false;
942 INIT_LIST_HEAD(&cwq->worklist); 2708#endif
943 init_waitqueue_head(&cwq->more_work);
944 2709
945 return cwq; 2710 if (percpu)
2711 free_percpu(wq->cpu_wq.pcpu);
2712 else if (wq->cpu_wq.single) {
2713 /* the pointer to free is stored right after the cwq */
2714 kfree(*(void **)(wq->cpu_wq.single + 1));
2715 }
946} 2716}
947 2717
948static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 2718static int wq_clamp_max_active(int max_active, unsigned int flags,
2719 const char *name)
949{ 2720{
950 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 2721 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
951 struct workqueue_struct *wq = cwq->wq;
952 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
953 struct task_struct *p;
954 2722
955 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 2723 if (max_active < 1 || max_active > lim)
956 /* 2724 printk(KERN_WARNING "workqueue: max_active %d requested for %s "
957 * Nobody can add the work_struct to this cwq, 2725 "is out of range, clamping between %d and %d\n",
958 * if (caller is __create_workqueue) 2726 max_active, name, 1, lim);
959 * nobody should see this wq
960 * else // caller is CPU_UP_PREPARE
961 * cpu is not on cpu_online_map
962 * so we can abort safely.
963 */
964 if (IS_ERR(p))
965 return PTR_ERR(p);
966 if (cwq->wq->rt)
967 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
968 cwq->thread = p;
969
970 trace_workqueue_creation(cwq->thread, cpu);
971 2727
972 return 0; 2728 return clamp_val(max_active, 1, lim);
973} 2729}
974 2730
975static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 2731struct workqueue_struct *__alloc_workqueue_key(const char *name,
2732 unsigned int flags,
2733 int max_active,
2734 struct lock_class_key *key,
2735 const char *lock_name)
976{ 2736{
977 struct task_struct *p = cwq->thread; 2737 struct workqueue_struct *wq;
2738 unsigned int cpu;
978 2739
979 if (p != NULL) { 2740 /*
980 if (cpu >= 0) 2741 * Unbound workqueues aren't concurrency managed and should be
981 kthread_bind(p, cpu); 2742 * dispatched to workers immediately.
982 wake_up_process(p); 2743 */
983 } 2744 if (flags & WQ_UNBOUND)
984} 2745 flags |= WQ_HIGHPRI;
985 2746
986struct workqueue_struct *__create_workqueue_key(const char *name, 2747 max_active = max_active ?: WQ_DFL_ACTIVE;
987 int singlethread, 2748 max_active = wq_clamp_max_active(max_active, flags, name);
988 int freezeable,
989 int rt,
990 struct lock_class_key *key,
991 const char *lock_name)
992{
993 struct workqueue_struct *wq;
994 struct cpu_workqueue_struct *cwq;
995 int err = 0, cpu;
996 2749
997 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 2750 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
998 if (!wq) 2751 if (!wq)
999 return NULL; 2752 goto err;
1000 2753
1001 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 2754 wq->flags = flags;
1002 if (!wq->cpu_wq) { 2755 wq->saved_max_active = max_active;
1003 kfree(wq); 2756 mutex_init(&wq->flush_mutex);
1004 return NULL; 2757 atomic_set(&wq->nr_cwqs_to_flush, 0);
1005 } 2758 INIT_LIST_HEAD(&wq->flusher_queue);
2759 INIT_LIST_HEAD(&wq->flusher_overflow);
1006 2760
1007 wq->name = name; 2761 wq->name = name;
1008 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 2762 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1009 wq->singlethread = singlethread;
1010 wq->freezeable = freezeable;
1011 wq->rt = rt;
1012 INIT_LIST_HEAD(&wq->list); 2763 INIT_LIST_HEAD(&wq->list);
1013 2764
1014 if (singlethread) { 2765 if (alloc_cwqs(wq) < 0)
1015 cwq = init_cpu_workqueue(wq, singlethread_cpu); 2766 goto err;
1016 err = create_workqueue_thread(cwq, singlethread_cpu); 2767
1017 start_workqueue_thread(cwq, -1); 2768 for_each_cwq_cpu(cpu, wq) {
1018 } else { 2769 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1019 cpu_maps_update_begin(); 2770 struct global_cwq *gcwq = get_gcwq(cpu);
1020 /* 2771
1021 * We must place this wq on list even if the code below fails. 2772 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
1022 * cpu_down(cpu) can remove cpu from cpu_populated_map before 2773 cwq->gcwq = gcwq;
1023 * destroy_workqueue() takes the lock, in that case we leak 2774 cwq->wq = wq;
1024 * cwq[cpu]->thread. 2775 cwq->flush_color = -1;
1025 */ 2776 cwq->max_active = max_active;
1026 spin_lock(&workqueue_lock); 2777 INIT_LIST_HEAD(&cwq->delayed_works);
1027 list_add(&wq->list, &workqueues);
1028 spin_unlock(&workqueue_lock);
1029 /*
1030 * We must initialize cwqs for each possible cpu even if we
1031 * are going to call destroy_workqueue() finally. Otherwise
1032 * cpu_up() can hit the uninitialized cwq once we drop the
1033 * lock.
1034 */
1035 for_each_possible_cpu(cpu) {
1036 cwq = init_cpu_workqueue(wq, cpu);
1037 if (err || !cpu_online(cpu))
1038 continue;
1039 err = create_workqueue_thread(cwq, cpu);
1040 start_workqueue_thread(cwq, cpu);
1041 }
1042 cpu_maps_update_done();
1043 } 2778 }
1044 2779
1045 if (err) { 2780 if (flags & WQ_RESCUER) {
1046 destroy_workqueue(wq); 2781 struct worker *rescuer;
1047 wq = NULL; 2782
2783 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2784 goto err;
2785
2786 wq->rescuer = rescuer = alloc_worker();
2787 if (!rescuer)
2788 goto err;
2789
2790 rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2791 if (IS_ERR(rescuer->task))
2792 goto err;
2793
2794 wq->rescuer = rescuer;
2795 rescuer->task->flags |= PF_THREAD_BOUND;
2796 wake_up_process(rescuer->task);
1048 } 2797 }
1049 return wq;
1050}
1051EXPORT_SYMBOL_GPL(__create_workqueue_key);
1052 2798
1053static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1054{
1055 /* 2799 /*
1056 * Our caller is either destroy_workqueue() or CPU_POST_DEAD, 2800 * workqueue_lock protects global freeze state and workqueues
1057 * cpu_add_remove_lock protects cwq->thread. 2801 * list. Grab it, set max_active accordingly and add the new
2802 * workqueue to workqueues list.
1058 */ 2803 */
1059 if (cwq->thread == NULL) 2804 spin_lock(&workqueue_lock);
1060 return;
1061 2805
1062 lock_map_acquire(&cwq->wq->lockdep_map); 2806 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
1063 lock_map_release(&cwq->wq->lockdep_map); 2807 for_each_cwq_cpu(cpu, wq)
2808 get_cwq(cpu, wq)->max_active = 0;
1064 2809
1065 flush_cpu_workqueue(cwq); 2810 list_add(&wq->list, &workqueues);
1066 /* 2811
1067 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, 2812 spin_unlock(&workqueue_lock);
1068 * a concurrent flush_workqueue() can insert a barrier after us. 2813
1069 * However, in that case run_workqueue() won't return and check 2814 return wq;
1070 * kthread_should_stop() until it flushes all work_struct's. 2815err:
1071 * When ->worklist becomes empty it is safe to exit because no 2816 if (wq) {
1072 * more work_structs can be queued on this cwq: flush_workqueue 2817 free_cwqs(wq);
1073 * checks list_empty(), and a "normal" queue_work() can't use 2818 free_mayday_mask(wq->mayday_mask);
1074 * a dead CPU. 2819 kfree(wq->rescuer);
1075 */ 2820 kfree(wq);
1076 trace_workqueue_destruction(cwq->thread); 2821 }
1077 kthread_stop(cwq->thread); 2822 return NULL;
1078 cwq->thread = NULL;
1079} 2823}
2824EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
1080 2825
1081/** 2826/**
1082 * destroy_workqueue - safely terminate a workqueue 2827 * destroy_workqueue - safely terminate a workqueue
@@ -1086,72 +2831,516 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1086 */ 2831 */
1087void destroy_workqueue(struct workqueue_struct *wq) 2832void destroy_workqueue(struct workqueue_struct *wq)
1088{ 2833{
1089 const struct cpumask *cpu_map = wq_cpu_map(wq); 2834 unsigned int cpu;
1090 int cpu; 2835
2836 flush_workqueue(wq);
1091 2837
1092 cpu_maps_update_begin(); 2838 /*
2839 * wq list is used to freeze wq, remove from list after
2840 * flushing is complete in case freeze races us.
2841 */
1093 spin_lock(&workqueue_lock); 2842 spin_lock(&workqueue_lock);
1094 list_del(&wq->list); 2843 list_del(&wq->list);
1095 spin_unlock(&workqueue_lock); 2844 spin_unlock(&workqueue_lock);
1096 2845
1097 for_each_cpu(cpu, cpu_map) 2846 /* sanity check */
1098 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 2847 for_each_cwq_cpu(cpu, wq) {
1099 cpu_maps_update_done(); 2848 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2849 int i;
2850
2851 for (i = 0; i < WORK_NR_COLORS; i++)
2852 BUG_ON(cwq->nr_in_flight[i]);
2853 BUG_ON(cwq->nr_active);
2854 BUG_ON(!list_empty(&cwq->delayed_works));
2855 }
2856
2857 if (wq->flags & WQ_RESCUER) {
2858 kthread_stop(wq->rescuer->task);
2859 free_mayday_mask(wq->mayday_mask);
2860 }
1100 2861
1101 free_percpu(wq->cpu_wq); 2862 free_cwqs(wq);
1102 kfree(wq); 2863 kfree(wq);
1103} 2864}
1104EXPORT_SYMBOL_GPL(destroy_workqueue); 2865EXPORT_SYMBOL_GPL(destroy_workqueue);
1105 2866
2867/**
2868 * workqueue_set_max_active - adjust max_active of a workqueue
2869 * @wq: target workqueue
2870 * @max_active: new max_active value.
2871 *
2872 * Set max_active of @wq to @max_active.
2873 *
2874 * CONTEXT:
2875 * Don't call from IRQ context.
2876 */
2877void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2878{
2879 unsigned int cpu;
2880
2881 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2882
2883 spin_lock(&workqueue_lock);
2884
2885 wq->saved_max_active = max_active;
2886
2887 for_each_cwq_cpu(cpu, wq) {
2888 struct global_cwq *gcwq = get_gcwq(cpu);
2889
2890 spin_lock_irq(&gcwq->lock);
2891
2892 if (!(wq->flags & WQ_FREEZEABLE) ||
2893 !(gcwq->flags & GCWQ_FREEZING))
2894 get_cwq(gcwq->cpu, wq)->max_active = max_active;
2895
2896 spin_unlock_irq(&gcwq->lock);
2897 }
2898
2899 spin_unlock(&workqueue_lock);
2900}
2901EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2902
2903/**
2904 * workqueue_congested - test whether a workqueue is congested
2905 * @cpu: CPU in question
2906 * @wq: target workqueue
2907 *
2908 * Test whether @wq's cpu workqueue for @cpu is congested. There is
2909 * no synchronization around this function and the test result is
2910 * unreliable and only useful as advisory hints or for debugging.
2911 *
2912 * RETURNS:
2913 * %true if congested, %false otherwise.
2914 */
2915bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2916{
2917 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2918
2919 return !list_empty(&cwq->delayed_works);
2920}
2921EXPORT_SYMBOL_GPL(workqueue_congested);
2922
2923/**
2924 * work_cpu - return the last known associated cpu for @work
2925 * @work: the work of interest
2926 *
2927 * RETURNS:
2928 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
2929 */
2930unsigned int work_cpu(struct work_struct *work)
2931{
2932 struct global_cwq *gcwq = get_work_gcwq(work);
2933
2934 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
2935}
2936EXPORT_SYMBOL_GPL(work_cpu);
2937
2938/**
2939 * work_busy - test whether a work is currently pending or running
2940 * @work: the work to be tested
2941 *
2942 * Test whether @work is currently pending or running. There is no
2943 * synchronization around this function and the test result is
2944 * unreliable and only useful as advisory hints or for debugging.
2945 * Especially for reentrant wqs, the pending state might hide the
2946 * running state.
2947 *
2948 * RETURNS:
2949 * OR'd bitmask of WORK_BUSY_* bits.
2950 */
2951unsigned int work_busy(struct work_struct *work)
2952{
2953 struct global_cwq *gcwq = get_work_gcwq(work);
2954 unsigned long flags;
2955 unsigned int ret = 0;
2956
2957 if (!gcwq)
2958 return false;
2959
2960 spin_lock_irqsave(&gcwq->lock, flags);
2961
2962 if (work_pending(work))
2963 ret |= WORK_BUSY_PENDING;
2964 if (find_worker_executing_work(gcwq, work))
2965 ret |= WORK_BUSY_RUNNING;
2966
2967 spin_unlock_irqrestore(&gcwq->lock, flags);
2968
2969 return ret;
2970}
2971EXPORT_SYMBOL_GPL(work_busy);
2972
2973/*
2974 * CPU hotplug.
2975 *
2976 * There are two challenges in supporting CPU hotplug. Firstly, there
2977 * are a lot of assumptions on strong associations among work, cwq and
2978 * gcwq which make migrating pending and scheduled works very
2979 * difficult to implement without impacting hot paths. Secondly,
2980 * gcwqs serve mix of short, long and very long running works making
2981 * blocked draining impractical.
2982 *
2983 * This is solved by allowing a gcwq to be detached from CPU, running
2984 * it with unbound (rogue) workers and allowing it to be reattached
2985 * later if the cpu comes back online. A separate thread is created
2986 * to govern a gcwq in such state and is called the trustee of the
2987 * gcwq.
2988 *
2989 * Trustee states and their descriptions.
2990 *
2991 * START Command state used on startup. On CPU_DOWN_PREPARE, a
2992 * new trustee is started with this state.
2993 *
2994 * IN_CHARGE Once started, trustee will enter this state after
2995 * assuming the manager role and making all existing
2996 * workers rogue. DOWN_PREPARE waits for trustee to
2997 * enter this state. After reaching IN_CHARGE, trustee
2998 * tries to execute the pending worklist until it's empty
2999 * and the state is set to BUTCHER, or the state is set
3000 * to RELEASE.
3001 *
3002 * BUTCHER Command state which is set by the cpu callback after
3003 * the cpu has went down. Once this state is set trustee
3004 * knows that there will be no new works on the worklist
3005 * and once the worklist is empty it can proceed to
3006 * killing idle workers.
3007 *
3008 * RELEASE Command state which is set by the cpu callback if the
3009 * cpu down has been canceled or it has come online
3010 * again. After recognizing this state, trustee stops
3011 * trying to drain or butcher and clears ROGUE, rebinds
3012 * all remaining workers back to the cpu and releases
3013 * manager role.
3014 *
3015 * DONE Trustee will enter this state after BUTCHER or RELEASE
3016 * is complete.
3017 *
3018 * trustee CPU draining
3019 * took over down complete
3020 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3021 * | | ^
3022 * | CPU is back online v return workers |
3023 * ----------------> RELEASE --------------
3024 */
3025
3026/**
3027 * trustee_wait_event_timeout - timed event wait for trustee
3028 * @cond: condition to wait for
3029 * @timeout: timeout in jiffies
3030 *
3031 * wait_event_timeout() for trustee to use. Handles locking and
3032 * checks for RELEASE request.
3033 *
3034 * CONTEXT:
3035 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3036 * multiple times. To be used by trustee.
3037 *
3038 * RETURNS:
3039 * Positive indicating left time if @cond is satisfied, 0 if timed
3040 * out, -1 if canceled.
3041 */
3042#define trustee_wait_event_timeout(cond, timeout) ({ \
3043 long __ret = (timeout); \
3044 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3045 __ret) { \
3046 spin_unlock_irq(&gcwq->lock); \
3047 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3048 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3049 __ret); \
3050 spin_lock_irq(&gcwq->lock); \
3051 } \
3052 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3053})
3054
3055/**
3056 * trustee_wait_event - event wait for trustee
3057 * @cond: condition to wait for
3058 *
3059 * wait_event() for trustee to use. Automatically handles locking and
3060 * checks for CANCEL request.
3061 *
3062 * CONTEXT:
3063 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3064 * multiple times. To be used by trustee.
3065 *
3066 * RETURNS:
3067 * 0 if @cond is satisfied, -1 if canceled.
3068 */
3069#define trustee_wait_event(cond) ({ \
3070 long __ret1; \
3071 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3072 __ret1 < 0 ? -1 : 0; \
3073})
3074
3075static int __cpuinit trustee_thread(void *__gcwq)
3076{
3077 struct global_cwq *gcwq = __gcwq;
3078 struct worker *worker;
3079 struct work_struct *work;
3080 struct hlist_node *pos;
3081 long rc;
3082 int i;
3083
3084 BUG_ON(gcwq->cpu != smp_processor_id());
3085
3086 spin_lock_irq(&gcwq->lock);
3087 /*
3088 * Claim the manager position and make all workers rogue.
3089 * Trustee must be bound to the target cpu and can't be
3090 * cancelled.
3091 */
3092 BUG_ON(gcwq->cpu != smp_processor_id());
3093 rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3094 BUG_ON(rc < 0);
3095
3096 gcwq->flags |= GCWQ_MANAGING_WORKERS;
3097
3098 list_for_each_entry(worker, &gcwq->idle_list, entry)
3099 worker->flags |= WORKER_ROGUE;
3100
3101 for_each_busy_worker(worker, i, pos, gcwq)
3102 worker->flags |= WORKER_ROGUE;
3103
3104 /*
3105 * Call schedule() so that we cross rq->lock and thus can
3106 * guarantee sched callbacks see the rogue flag. This is
3107 * necessary as scheduler callbacks may be invoked from other
3108 * cpus.
3109 */
3110 spin_unlock_irq(&gcwq->lock);
3111 schedule();
3112 spin_lock_irq(&gcwq->lock);
3113
3114 /*
3115 * Sched callbacks are disabled now. Zap nr_running. After
3116 * this, nr_running stays zero and need_more_worker() and
3117 * keep_working() are always true as long as the worklist is
3118 * not empty.
3119 */
3120 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3121
3122 spin_unlock_irq(&gcwq->lock);
3123 del_timer_sync(&gcwq->idle_timer);
3124 spin_lock_irq(&gcwq->lock);
3125
3126 /*
3127 * We're now in charge. Notify and proceed to drain. We need
3128 * to keep the gcwq running during the whole CPU down
3129 * procedure as other cpu hotunplug callbacks may need to
3130 * flush currently running tasks.
3131 */
3132 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3133 wake_up_all(&gcwq->trustee_wait);
3134
3135 /*
3136 * The original cpu is in the process of dying and may go away
3137 * anytime now. When that happens, we and all workers would
3138 * be migrated to other cpus. Try draining any left work. We
3139 * want to get it over with ASAP - spam rescuers, wake up as
3140 * many idlers as necessary and create new ones till the
3141 * worklist is empty. Note that if the gcwq is frozen, there
3142 * may be frozen works in freezeable cwqs. Don't declare
3143 * completion while frozen.
3144 */
3145 while (gcwq->nr_workers != gcwq->nr_idle ||
3146 gcwq->flags & GCWQ_FREEZING ||
3147 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3148 int nr_works = 0;
3149
3150 list_for_each_entry(work, &gcwq->worklist, entry) {
3151 send_mayday(work);
3152 nr_works++;
3153 }
3154
3155 list_for_each_entry(worker, &gcwq->idle_list, entry) {
3156 if (!nr_works--)
3157 break;
3158 wake_up_process(worker->task);
3159 }
3160
3161 if (need_to_create_worker(gcwq)) {
3162 spin_unlock_irq(&gcwq->lock);
3163 worker = create_worker(gcwq, false);
3164 spin_lock_irq(&gcwq->lock);
3165 if (worker) {
3166 worker->flags |= WORKER_ROGUE;
3167 start_worker(worker);
3168 }
3169 }
3170
3171 /* give a breather */
3172 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3173 break;
3174 }
3175
3176 /*
3177 * Either all works have been scheduled and cpu is down, or
3178 * cpu down has already been canceled. Wait for and butcher
3179 * all workers till we're canceled.
3180 */
3181 do {
3182 rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3183 while (!list_empty(&gcwq->idle_list))
3184 destroy_worker(list_first_entry(&gcwq->idle_list,
3185 struct worker, entry));
3186 } while (gcwq->nr_workers && rc >= 0);
3187
3188 /*
3189 * At this point, either draining has completed and no worker
3190 * is left, or cpu down has been canceled or the cpu is being
3191 * brought back up. There shouldn't be any idle one left.
3192 * Tell the remaining busy ones to rebind once it finishes the
3193 * currently scheduled works by scheduling the rebind_work.
3194 */
3195 WARN_ON(!list_empty(&gcwq->idle_list));
3196
3197 for_each_busy_worker(worker, i, pos, gcwq) {
3198 struct work_struct *rebind_work = &worker->rebind_work;
3199
3200 /*
3201 * Rebind_work may race with future cpu hotplug
3202 * operations. Use a separate flag to mark that
3203 * rebinding is scheduled.
3204 */
3205 worker->flags |= WORKER_REBIND;
3206 worker->flags &= ~WORKER_ROGUE;
3207
3208 /* queue rebind_work, wq doesn't matter, use the default one */
3209 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3210 work_data_bits(rebind_work)))
3211 continue;
3212
3213 debug_work_activate(rebind_work);
3214 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3215 worker->scheduled.next,
3216 work_color_to_flags(WORK_NO_COLOR));
3217 }
3218
3219 /* relinquish manager role */
3220 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3221
3222 /* notify completion */
3223 gcwq->trustee = NULL;
3224 gcwq->trustee_state = TRUSTEE_DONE;
3225 wake_up_all(&gcwq->trustee_wait);
3226 spin_unlock_irq(&gcwq->lock);
3227 return 0;
3228}
3229
3230/**
3231 * wait_trustee_state - wait for trustee to enter the specified state
3232 * @gcwq: gcwq the trustee of interest belongs to
3233 * @state: target state to wait for
3234 *
3235 * Wait for the trustee to reach @state. DONE is already matched.
3236 *
3237 * CONTEXT:
3238 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3239 * multiple times. To be used by cpu_callback.
3240 */
3241static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3242{
3243 if (!(gcwq->trustee_state == state ||
3244 gcwq->trustee_state == TRUSTEE_DONE)) {
3245 spin_unlock_irq(&gcwq->lock);
3246 __wait_event(gcwq->trustee_wait,
3247 gcwq->trustee_state == state ||
3248 gcwq->trustee_state == TRUSTEE_DONE);
3249 spin_lock_irq(&gcwq->lock);
3250 }
3251}
3252
1106static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 3253static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1107 unsigned long action, 3254 unsigned long action,
1108 void *hcpu) 3255 void *hcpu)
1109{ 3256{
1110 unsigned int cpu = (unsigned long)hcpu; 3257 unsigned int cpu = (unsigned long)hcpu;
1111 struct cpu_workqueue_struct *cwq; 3258 struct global_cwq *gcwq = get_gcwq(cpu);
1112 struct workqueue_struct *wq; 3259 struct task_struct *new_trustee = NULL;
1113 int err = 0; 3260 struct worker *uninitialized_var(new_worker);
3261 unsigned long flags;
1114 3262
1115 action &= ~CPU_TASKS_FROZEN; 3263 action &= ~CPU_TASKS_FROZEN;
1116 3264
1117 switch (action) { 3265 switch (action) {
3266 case CPU_DOWN_PREPARE:
3267 new_trustee = kthread_create(trustee_thread, gcwq,
3268 "workqueue_trustee/%d\n", cpu);
3269 if (IS_ERR(new_trustee))
3270 return notifier_from_errno(PTR_ERR(new_trustee));
3271 kthread_bind(new_trustee, cpu);
3272 /* fall through */
1118 case CPU_UP_PREPARE: 3273 case CPU_UP_PREPARE:
1119 cpumask_set_cpu(cpu, cpu_populated_map); 3274 BUG_ON(gcwq->first_idle);
1120 } 3275 new_worker = create_worker(gcwq, false);
1121undo: 3276 if (!new_worker) {
1122 list_for_each_entry(wq, &workqueues, list) { 3277 if (new_trustee)
1123 cwq = per_cpu_ptr(wq->cpu_wq, cpu); 3278 kthread_stop(new_trustee);
1124 3279 return NOTIFY_BAD;
1125 switch (action) {
1126 case CPU_UP_PREPARE:
1127 err = create_workqueue_thread(cwq, cpu);
1128 if (!err)
1129 break;
1130 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1131 wq->name, cpu);
1132 action = CPU_UP_CANCELED;
1133 err = -ENOMEM;
1134 goto undo;
1135
1136 case CPU_ONLINE:
1137 start_workqueue_thread(cwq, cpu);
1138 break;
1139
1140 case CPU_UP_CANCELED:
1141 start_workqueue_thread(cwq, -1);
1142 case CPU_POST_DEAD:
1143 cleanup_workqueue_thread(cwq);
1144 break;
1145 } 3280 }
1146 } 3281 }
1147 3282
3283 /* some are called w/ irq disabled, don't disturb irq status */
3284 spin_lock_irqsave(&gcwq->lock, flags);
3285
1148 switch (action) { 3286 switch (action) {
1149 case CPU_UP_CANCELED: 3287 case CPU_DOWN_PREPARE:
3288 /* initialize trustee and tell it to acquire the gcwq */
3289 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3290 gcwq->trustee = new_trustee;
3291 gcwq->trustee_state = TRUSTEE_START;
3292 wake_up_process(gcwq->trustee);
3293 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3294 /* fall through */
3295 case CPU_UP_PREPARE:
3296 BUG_ON(gcwq->first_idle);
3297 gcwq->first_idle = new_worker;
3298 break;
3299
3300 case CPU_DYING:
3301 /*
3302 * Before this, the trustee and all workers except for
3303 * the ones which are still executing works from
3304 * before the last CPU down must be on the cpu. After
3305 * this, they'll all be diasporas.
3306 */
3307 gcwq->flags |= GCWQ_DISASSOCIATED;
3308 break;
3309
1150 case CPU_POST_DEAD: 3310 case CPU_POST_DEAD:
1151 cpumask_clear_cpu(cpu, cpu_populated_map); 3311 gcwq->trustee_state = TRUSTEE_BUTCHER;
3312 /* fall through */
3313 case CPU_UP_CANCELED:
3314 destroy_worker(gcwq->first_idle);
3315 gcwq->first_idle = NULL;
3316 break;
3317
3318 case CPU_DOWN_FAILED:
3319 case CPU_ONLINE:
3320 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3321 if (gcwq->trustee_state != TRUSTEE_DONE) {
3322 gcwq->trustee_state = TRUSTEE_RELEASE;
3323 wake_up_process(gcwq->trustee);
3324 wait_trustee_state(gcwq, TRUSTEE_DONE);
3325 }
3326
3327 /*
3328 * Trustee is done and there might be no worker left.
3329 * Put the first_idle in and request a real manager to
3330 * take a look.
3331 */
3332 spin_unlock_irq(&gcwq->lock);
3333 kthread_bind(gcwq->first_idle->task, cpu);
3334 spin_lock_irq(&gcwq->lock);
3335 gcwq->flags |= GCWQ_MANAGE_WORKERS;
3336 start_worker(gcwq->first_idle);
3337 gcwq->first_idle = NULL;
3338 break;
1152 } 3339 }
1153 3340
1154 return notifier_from_errno(err); 3341 spin_unlock_irqrestore(&gcwq->lock, flags);
3342
3343 return notifier_from_errno(0);
1155} 3344}
1156 3345
1157#ifdef CONFIG_SMP 3346#ifdef CONFIG_SMP
@@ -1201,14 +3390,199 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1201EXPORT_SYMBOL_GPL(work_on_cpu); 3390EXPORT_SYMBOL_GPL(work_on_cpu);
1202#endif /* CONFIG_SMP */ 3391#endif /* CONFIG_SMP */
1203 3392
1204void __init init_workqueues(void) 3393#ifdef CONFIG_FREEZER
3394
3395/**
3396 * freeze_workqueues_begin - begin freezing workqueues
3397 *
3398 * Start freezing workqueues. After this function returns, all
3399 * freezeable workqueues will queue new works to their frozen_works
3400 * list instead of gcwq->worklist.
3401 *
3402 * CONTEXT:
3403 * Grabs and releases workqueue_lock and gcwq->lock's.
3404 */
3405void freeze_workqueues_begin(void)
1205{ 3406{
1206 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); 3407 unsigned int cpu;
1207 3408
1208 cpumask_copy(cpu_populated_map, cpu_online_mask); 3409 spin_lock(&workqueue_lock);
1209 singlethread_cpu = cpumask_first(cpu_possible_mask); 3410
1210 cpu_singlethread_map = cpumask_of(singlethread_cpu); 3411 BUG_ON(workqueue_freezing);
1211 hotcpu_notifier(workqueue_cpu_callback, 0); 3412 workqueue_freezing = true;
1212 keventd_wq = create_workqueue("events"); 3413
1213 BUG_ON(!keventd_wq); 3414 for_each_gcwq_cpu(cpu) {
3415 struct global_cwq *gcwq = get_gcwq(cpu);
3416 struct workqueue_struct *wq;
3417
3418 spin_lock_irq(&gcwq->lock);
3419
3420 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3421 gcwq->flags |= GCWQ_FREEZING;
3422
3423 list_for_each_entry(wq, &workqueues, list) {
3424 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3425
3426 if (cwq && wq->flags & WQ_FREEZEABLE)
3427 cwq->max_active = 0;
3428 }
3429
3430 spin_unlock_irq(&gcwq->lock);
3431 }
3432
3433 spin_unlock(&workqueue_lock);
3434}
3435
3436/**
3437 * freeze_workqueues_busy - are freezeable workqueues still busy?
3438 *
3439 * Check whether freezing is complete. This function must be called
3440 * between freeze_workqueues_begin() and thaw_workqueues().
3441 *
3442 * CONTEXT:
3443 * Grabs and releases workqueue_lock.
3444 *
3445 * RETURNS:
3446 * %true if some freezeable workqueues are still busy. %false if
3447 * freezing is complete.
3448 */
3449bool freeze_workqueues_busy(void)
3450{
3451 unsigned int cpu;
3452 bool busy = false;
3453
3454 spin_lock(&workqueue_lock);
3455
3456 BUG_ON(!workqueue_freezing);
3457
3458 for_each_gcwq_cpu(cpu) {
3459 struct workqueue_struct *wq;
3460 /*
3461 * nr_active is monotonically decreasing. It's safe
3462 * to peek without lock.
3463 */
3464 list_for_each_entry(wq, &workqueues, list) {
3465 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3466
3467 if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3468 continue;
3469
3470 BUG_ON(cwq->nr_active < 0);
3471 if (cwq->nr_active) {
3472 busy = true;
3473 goto out_unlock;
3474 }
3475 }
3476 }
3477out_unlock:
3478 spin_unlock(&workqueue_lock);
3479 return busy;
3480}
3481
3482/**
3483 * thaw_workqueues - thaw workqueues
3484 *
3485 * Thaw workqueues. Normal queueing is restored and all collected
3486 * frozen works are transferred to their respective gcwq worklists.
3487 *
3488 * CONTEXT:
3489 * Grabs and releases workqueue_lock and gcwq->lock's.
3490 */
3491void thaw_workqueues(void)
3492{
3493 unsigned int cpu;
3494
3495 spin_lock(&workqueue_lock);
3496
3497 if (!workqueue_freezing)
3498 goto out_unlock;
3499
3500 for_each_gcwq_cpu(cpu) {
3501 struct global_cwq *gcwq = get_gcwq(cpu);
3502 struct workqueue_struct *wq;
3503
3504 spin_lock_irq(&gcwq->lock);
3505
3506 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3507 gcwq->flags &= ~GCWQ_FREEZING;
3508
3509 list_for_each_entry(wq, &workqueues, list) {
3510 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3511
3512 if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3513 continue;
3514
3515 /* restore max_active and repopulate worklist */
3516 cwq->max_active = wq->saved_max_active;
3517
3518 while (!list_empty(&cwq->delayed_works) &&
3519 cwq->nr_active < cwq->max_active)
3520 cwq_activate_first_delayed(cwq);
3521 }
3522
3523 wake_up_worker(gcwq);
3524
3525 spin_unlock_irq(&gcwq->lock);
3526 }
3527
3528 workqueue_freezing = false;
3529out_unlock:
3530 spin_unlock(&workqueue_lock);
3531}
3532#endif /* CONFIG_FREEZER */
3533
3534static int __init init_workqueues(void)
3535{
3536 unsigned int cpu;
3537 int i;
3538
3539 cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
3540
3541 /* initialize gcwqs */
3542 for_each_gcwq_cpu(cpu) {
3543 struct global_cwq *gcwq = get_gcwq(cpu);
3544
3545 spin_lock_init(&gcwq->lock);
3546 INIT_LIST_HEAD(&gcwq->worklist);
3547 gcwq->cpu = cpu;
3548 if (cpu == WORK_CPU_UNBOUND)
3549 gcwq->flags |= GCWQ_DISASSOCIATED;
3550
3551 INIT_LIST_HEAD(&gcwq->idle_list);
3552 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3553 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3554
3555 init_timer_deferrable(&gcwq->idle_timer);
3556 gcwq->idle_timer.function = idle_worker_timeout;
3557 gcwq->idle_timer.data = (unsigned long)gcwq;
3558
3559 setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3560 (unsigned long)gcwq);
3561
3562 ida_init(&gcwq->worker_ida);
3563
3564 gcwq->trustee_state = TRUSTEE_DONE;
3565 init_waitqueue_head(&gcwq->trustee_wait);
3566 }
3567
3568 /* create the initial worker */
3569 for_each_online_gcwq_cpu(cpu) {
3570 struct global_cwq *gcwq = get_gcwq(cpu);
3571 struct worker *worker;
3572
3573 worker = create_worker(gcwq, true);
3574 BUG_ON(!worker);
3575 spin_lock_irq(&gcwq->lock);
3576 start_worker(worker);
3577 spin_unlock_irq(&gcwq->lock);
3578 }
3579
3580 system_wq = alloc_workqueue("events", 0, 0);
3581 system_long_wq = alloc_workqueue("events_long", 0, 0);
3582 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3583 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3584 WQ_UNBOUND_MAX_ACTIVE);
3585 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
3586 return 0;
1214} 3587}
3588early_initcall(init_workqueues);