aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c144
1 files changed, 94 insertions, 50 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03d2c27..c5257316f4b9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -29,6 +29,9 @@
29#include <linux/kthread.h> 29#include <linux/kthread.h>
30#include <linux/hardirq.h> 30#include <linux/hardirq.h>
31#include <linux/mempolicy.h> 31#include <linux/mempolicy.h>
32#include <linux/freezer.h>
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
32 35
33/* 36/*
34 * The per-CPU workqueue (if single thread, we always use the first 37 * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +58,8 @@ struct cpu_workqueue_struct {
55 struct task_struct *thread; 58 struct task_struct *thread;
56 59
57 int run_depth; /* Detect run_workqueue() recursion depth */ 60 int run_depth; /* Detect run_workqueue() recursion depth */
61
62 int freezeable; /* Freeze the thread during suspend */
58} ____cacheline_aligned; 63} ____cacheline_aligned;
59 64
60/* 65/*
@@ -80,6 +85,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
80 return list_empty(&wq->list); 85 return list_empty(&wq->list);
81} 86}
82 87
88static inline void set_wq_data(struct work_struct *work, void *wq)
89{
90 unsigned long new, old, res;
91
92 /* assume the pending flag is already set and that the task has already
93 * been queued on this workqueue */
94 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
95 res = work->management;
96 if (res != new) {
97 do {
98 old = res;
99 new = (unsigned long) wq;
100 new |= (old & WORK_STRUCT_FLAG_MASK);
101 res = cmpxchg(&work->management, old, new);
102 } while (res != old);
103 }
104}
105
106static inline void *get_wq_data(struct work_struct *work)
107{
108 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
109}
110
83/* Preempt must be disabled. */ 111/* Preempt must be disabled. */
84static void __queue_work(struct cpu_workqueue_struct *cwq, 112static void __queue_work(struct cpu_workqueue_struct *cwq,
85 struct work_struct *work) 113 struct work_struct *work)
@@ -87,7 +115,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
87 unsigned long flags; 115 unsigned long flags;
88 116
89 spin_lock_irqsave(&cwq->lock, flags); 117 spin_lock_irqsave(&cwq->lock, flags);
90 work->wq_data = cwq; 118 set_wq_data(work, cwq);
91 list_add_tail(&work->entry, &cwq->worklist); 119 list_add_tail(&work->entry, &cwq->worklist);
92 cwq->insert_sequence++; 120 cwq->insert_sequence++;
93 wake_up(&cwq->more_work); 121 wake_up(&cwq->more_work);
@@ -108,7 +136,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
108{ 136{
109 int ret = 0, cpu = get_cpu(); 137 int ret = 0, cpu = get_cpu();
110 138
111 if (!test_and_set_bit(0, &work->pending)) { 139 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
112 if (unlikely(is_single_threaded(wq))) 140 if (unlikely(is_single_threaded(wq)))
113 cpu = singlethread_cpu; 141 cpu = singlethread_cpu;
114 BUG_ON(!list_empty(&work->entry)); 142 BUG_ON(!list_empty(&work->entry));
@@ -122,38 +150,42 @@ EXPORT_SYMBOL_GPL(queue_work);
122 150
123static void delayed_work_timer_fn(unsigned long __data) 151static void delayed_work_timer_fn(unsigned long __data)
124{ 152{
125 struct work_struct *work = (struct work_struct *)__data; 153 struct delayed_work *dwork = (struct delayed_work *)__data;
126 struct workqueue_struct *wq = work->wq_data; 154 struct workqueue_struct *wq = get_wq_data(&dwork->work);
127 int cpu = smp_processor_id(); 155 int cpu = smp_processor_id();
128 156
129 if (unlikely(is_single_threaded(wq))) 157 if (unlikely(is_single_threaded(wq)))
130 cpu = singlethread_cpu; 158 cpu = singlethread_cpu;
131 159
132 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 160 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
133} 161}
134 162
135/** 163/**
136 * queue_delayed_work - queue work on a workqueue after delay 164 * queue_delayed_work - queue work on a workqueue after delay
137 * @wq: workqueue to use 165 * @wq: workqueue to use
138 * @work: work to queue 166 * @work: delayable work to queue
139 * @delay: number of jiffies to wait before queueing 167 * @delay: number of jiffies to wait before queueing
140 * 168 *
141 * Returns 0 if @work was already on a queue, non-zero otherwise. 169 * Returns 0 if @work was already on a queue, non-zero otherwise.
142 */ 170 */
143int fastcall queue_delayed_work(struct workqueue_struct *wq, 171int fastcall queue_delayed_work(struct workqueue_struct *wq,
144 struct work_struct *work, unsigned long delay) 172 struct delayed_work *dwork, unsigned long delay)
145{ 173{
146 int ret = 0; 174 int ret = 0;
147 struct timer_list *timer = &work->timer; 175 struct timer_list *timer = &dwork->timer;
176 struct work_struct *work = &dwork->work;
177
178 if (delay == 0)
179 return queue_work(wq, work);
148 180
149 if (!test_and_set_bit(0, &work->pending)) { 181 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
150 BUG_ON(timer_pending(timer)); 182 BUG_ON(timer_pending(timer));
151 BUG_ON(!list_empty(&work->entry)); 183 BUG_ON(!list_empty(&work->entry));
152 184
153 /* This stores wq for the moment, for the timer_fn */ 185 /* This stores wq for the moment, for the timer_fn */
154 work->wq_data = wq; 186 set_wq_data(work, wq);
155 timer->expires = jiffies + delay; 187 timer->expires = jiffies + delay;
156 timer->data = (unsigned long)work; 188 timer->data = (unsigned long)dwork;
157 timer->function = delayed_work_timer_fn; 189 timer->function = delayed_work_timer_fn;
158 add_timer(timer); 190 add_timer(timer);
159 ret = 1; 191 ret = 1;
@@ -172,19 +204,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
172 * Returns 0 if @work was already on a queue, non-zero otherwise. 204 * Returns 0 if @work was already on a queue, non-zero otherwise.
173 */ 205 */
174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 206int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
175 struct work_struct *work, unsigned long delay) 207 struct delayed_work *dwork, unsigned long delay)
176{ 208{
177 int ret = 0; 209 int ret = 0;
178 struct timer_list *timer = &work->timer; 210 struct timer_list *timer = &dwork->timer;
211 struct work_struct *work = &dwork->work;
179 212
180 if (!test_and_set_bit(0, &work->pending)) { 213 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
181 BUG_ON(timer_pending(timer)); 214 BUG_ON(timer_pending(timer));
182 BUG_ON(!list_empty(&work->entry)); 215 BUG_ON(!list_empty(&work->entry));
183 216
184 /* This stores wq for the moment, for the timer_fn */ 217 /* This stores wq for the moment, for the timer_fn */
185 work->wq_data = wq; 218 set_wq_data(work, wq);
186 timer->expires = jiffies + delay; 219 timer->expires = jiffies + delay;
187 timer->data = (unsigned long)work; 220 timer->data = (unsigned long)dwork;
188 timer->function = delayed_work_timer_fn; 221 timer->function = delayed_work_timer_fn;
189 add_timer_on(timer, cpu); 222 add_timer_on(timer, cpu);
190 ret = 1; 223 ret = 1;
@@ -212,15 +245,26 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
212 while (!list_empty(&cwq->worklist)) { 245 while (!list_empty(&cwq->worklist)) {
213 struct work_struct *work = list_entry(cwq->worklist.next, 246 struct work_struct *work = list_entry(cwq->worklist.next,
214 struct work_struct, entry); 247 struct work_struct, entry);
215 void (*f) (void *) = work->func; 248 work_func_t f = work->func;
216 void *data = work->data;
217 249
218 list_del_init(cwq->worklist.next); 250 list_del_init(cwq->worklist.next);
219 spin_unlock_irqrestore(&cwq->lock, flags); 251 spin_unlock_irqrestore(&cwq->lock, flags);
220 252
221 BUG_ON(work->wq_data != cwq); 253 BUG_ON(get_wq_data(work) != cwq);
222 clear_bit(0, &work->pending); 254 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
223 f(data); 255 work_release(work);
256 f(work);
257
258 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
259 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
260 "%s/0x%08x/%d\n",
261 current->comm, preempt_count(),
262 current->pid);
263 printk(KERN_ERR " last function: ");
264 print_symbol("%s\n", (unsigned long)f);
265 debug_show_held_locks(current);
266 dump_stack();
267 }
224 268
225 spin_lock_irqsave(&cwq->lock, flags); 269 spin_lock_irqsave(&cwq->lock, flags);
226 cwq->remove_sequence++; 270 cwq->remove_sequence++;
@@ -237,7 +281,8 @@ static int worker_thread(void *__cwq)
237 struct k_sigaction sa; 281 struct k_sigaction sa;
238 sigset_t blocked; 282 sigset_t blocked;
239 283
240 current->flags |= PF_NOFREEZE; 284 if (!cwq->freezeable)
285 current->flags |= PF_NOFREEZE;
241 286
242 set_user_nice(current, -5); 287 set_user_nice(current, -5);
243 288
@@ -260,6 +305,9 @@ static int worker_thread(void *__cwq)
260 305
261 set_current_state(TASK_INTERRUPTIBLE); 306 set_current_state(TASK_INTERRUPTIBLE);
262 while (!kthread_should_stop()) { 307 while (!kthread_should_stop()) {
308 if (cwq->freezeable)
309 try_to_freeze();
310
263 add_wait_queue(&cwq->more_work, &wait); 311 add_wait_queue(&cwq->more_work, &wait);
264 if (list_empty(&cwq->worklist)) 312 if (list_empty(&cwq->worklist))
265 schedule(); 313 schedule();
@@ -336,7 +384,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
336EXPORT_SYMBOL_GPL(flush_workqueue); 384EXPORT_SYMBOL_GPL(flush_workqueue);
337 385
338static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 386static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
339 int cpu) 387 int cpu, int freezeable)
340{ 388{
341 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 389 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
342 struct task_struct *p; 390 struct task_struct *p;
@@ -346,6 +394,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
346 cwq->thread = NULL; 394 cwq->thread = NULL;
347 cwq->insert_sequence = 0; 395 cwq->insert_sequence = 0;
348 cwq->remove_sequence = 0; 396 cwq->remove_sequence = 0;
397 cwq->freezeable = freezeable;
349 INIT_LIST_HEAD(&cwq->worklist); 398 INIT_LIST_HEAD(&cwq->worklist);
350 init_waitqueue_head(&cwq->more_work); 399 init_waitqueue_head(&cwq->more_work);
351 init_waitqueue_head(&cwq->work_done); 400 init_waitqueue_head(&cwq->work_done);
@@ -361,7 +410,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
361} 410}
362 411
363struct workqueue_struct *__create_workqueue(const char *name, 412struct workqueue_struct *__create_workqueue(const char *name,
364 int singlethread) 413 int singlethread, int freezeable)
365{ 414{
366 int cpu, destroy = 0; 415 int cpu, destroy = 0;
367 struct workqueue_struct *wq; 416 struct workqueue_struct *wq;
@@ -381,7 +430,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
381 mutex_lock(&workqueue_mutex); 430 mutex_lock(&workqueue_mutex);
382 if (singlethread) { 431 if (singlethread) {
383 INIT_LIST_HEAD(&wq->list); 432 INIT_LIST_HEAD(&wq->list);
384 p = create_workqueue_thread(wq, singlethread_cpu); 433 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
385 if (!p) 434 if (!p)
386 destroy = 1; 435 destroy = 1;
387 else 436 else
@@ -389,7 +438,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
389 } else { 438 } else {
390 list_add(&wq->list, &workqueues); 439 list_add(&wq->list, &workqueues);
391 for_each_online_cpu(cpu) { 440 for_each_online_cpu(cpu) {
392 p = create_workqueue_thread(wq, cpu); 441 p = create_workqueue_thread(wq, cpu, freezeable);
393 if (p) { 442 if (p) {
394 kthread_bind(p, cpu); 443 kthread_bind(p, cpu);
395 wake_up_process(p); 444 wake_up_process(p);
@@ -468,38 +517,37 @@ EXPORT_SYMBOL(schedule_work);
468 517
469/** 518/**
470 * schedule_delayed_work - put work task in global workqueue after delay 519 * schedule_delayed_work - put work task in global workqueue after delay
471 * @work: job to be done 520 * @dwork: job to be done
472 * @delay: number of jiffies to wait 521 * @delay: number of jiffies to wait or 0 for immediate execution
473 * 522 *
474 * After waiting for a given time this puts a job in the kernel-global 523 * After waiting for a given time this puts a job in the kernel-global
475 * workqueue. 524 * workqueue.
476 */ 525 */
477int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 526int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
478{ 527{
479 return queue_delayed_work(keventd_wq, work, delay); 528 return queue_delayed_work(keventd_wq, dwork, delay);
480} 529}
481EXPORT_SYMBOL(schedule_delayed_work); 530EXPORT_SYMBOL(schedule_delayed_work);
482 531
483/** 532/**
484 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 533 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
485 * @cpu: cpu to use 534 * @cpu: cpu to use
486 * @work: job to be done 535 * @dwork: job to be done
487 * @delay: number of jiffies to wait 536 * @delay: number of jiffies to wait
488 * 537 *
489 * After waiting for a given time this puts a job in the kernel-global 538 * After waiting for a given time this puts a job in the kernel-global
490 * workqueue on the specified CPU. 539 * workqueue on the specified CPU.
491 */ 540 */
492int schedule_delayed_work_on(int cpu, 541int schedule_delayed_work_on(int cpu,
493 struct work_struct *work, unsigned long delay) 542 struct delayed_work *dwork, unsigned long delay)
494{ 543{
495 return queue_delayed_work_on(cpu, keventd_wq, work, delay); 544 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
496} 545}
497EXPORT_SYMBOL(schedule_delayed_work_on); 546EXPORT_SYMBOL(schedule_delayed_work_on);
498 547
499/** 548/**
500 * schedule_on_each_cpu - call a function on each online CPU from keventd 549 * schedule_on_each_cpu - call a function on each online CPU from keventd
501 * @func: the function to call 550 * @func: the function to call
502 * @info: a pointer to pass to func()
503 * 551 *
504 * Returns zero on success. 552 * Returns zero on success.
505 * Returns -ve errno on failure. 553 * Returns -ve errno on failure.
@@ -508,7 +556,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
508 * 556 *
509 * schedule_on_each_cpu() is very slow. 557 * schedule_on_each_cpu() is very slow.
510 */ 558 */
511int schedule_on_each_cpu(void (*func)(void *info), void *info) 559int schedule_on_each_cpu(work_func_t func)
512{ 560{
513 int cpu; 561 int cpu;
514 struct work_struct *works; 562 struct work_struct *works;
@@ -519,7 +567,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
519 567
520 mutex_lock(&workqueue_mutex); 568 mutex_lock(&workqueue_mutex);
521 for_each_online_cpu(cpu) { 569 for_each_online_cpu(cpu) {
522 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 570 INIT_WORK(per_cpu_ptr(works, cpu), func);
523 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 571 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
524 per_cpu_ptr(works, cpu)); 572 per_cpu_ptr(works, cpu));
525 } 573 }
@@ -539,12 +587,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
539 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 587 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
540 * work whose handler rearms the delayed work. 588 * work whose handler rearms the delayed work.
541 * @wq: the controlling workqueue structure 589 * @wq: the controlling workqueue structure
542 * @work: the delayed work struct 590 * @dwork: the delayed work struct
543 */ 591 */
544void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 592void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
545 struct work_struct *work) 593 struct delayed_work *dwork)
546{ 594{
547 while (!cancel_delayed_work(work)) 595 while (!cancel_delayed_work(dwork))
548 flush_workqueue(wq); 596 flush_workqueue(wq);
549} 597}
550EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 598EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +600,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
552/** 600/**
553 * cancel_rearming_delayed_work - reliably kill off a delayed keventd 601 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
554 * work whose handler rearms the delayed work. 602 * work whose handler rearms the delayed work.
555 * @work: the delayed work struct 603 * @dwork: the delayed work struct
556 */ 604 */
557void cancel_rearming_delayed_work(struct work_struct *work) 605void cancel_rearming_delayed_work(struct delayed_work *dwork)
558{ 606{
559 cancel_rearming_delayed_workqueue(keventd_wq, work); 607 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
560} 608}
561EXPORT_SYMBOL(cancel_rearming_delayed_work); 609EXPORT_SYMBOL(cancel_rearming_delayed_work);
562 610
563/** 611/**
564 * execute_in_process_context - reliably execute the routine with user context 612 * execute_in_process_context - reliably execute the routine with user context
565 * @fn: the function to execute 613 * @fn: the function to execute
566 * @data: data to pass to the function
567 * @ew: guaranteed storage for the execute work structure (must 614 * @ew: guaranteed storage for the execute work structure (must
568 * be available when the work executes) 615 * be available when the work executes)
569 * 616 *
@@ -573,15 +620,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
573 * Returns: 0 - function was executed 620 * Returns: 0 - function was executed
574 * 1 - function was scheduled for execution 621 * 1 - function was scheduled for execution
575 */ 622 */
576int execute_in_process_context(void (*fn)(void *data), void *data, 623int execute_in_process_context(work_func_t fn, struct execute_work *ew)
577 struct execute_work *ew)
578{ 624{
579 if (!in_interrupt()) { 625 if (!in_interrupt()) {
580 fn(data); 626 fn(&ew->work);
581 return 0; 627 return 0;
582 } 628 }
583 629
584 INIT_WORK(&ew->work, fn, data); 630 INIT_WORK(&ew->work, fn);
585 schedule_work(&ew->work); 631 schedule_work(&ew->work);
586 632
587 return 1; 633 return 1;
@@ -609,7 +655,6 @@ int current_is_keventd(void)
609 655
610} 656}
611 657
612#ifdef CONFIG_HOTPLUG_CPU
613/* Take the work from this (downed) CPU. */ 658/* Take the work from this (downed) CPU. */
614static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 659static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
615{ 660{
@@ -642,7 +687,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
642 mutex_lock(&workqueue_mutex); 687 mutex_lock(&workqueue_mutex);
643 /* Create a new workqueue thread for it. */ 688 /* Create a new workqueue thread for it. */
644 list_for_each_entry(wq, &workqueues, list) { 689 list_for_each_entry(wq, &workqueues, list) {
645 if (!create_workqueue_thread(wq, hotcpu)) { 690 if (!create_workqueue_thread(wq, hotcpu, 0)) {
646 printk("workqueue for %i failed\n", hotcpu); 691 printk("workqueue for %i failed\n", hotcpu);
647 return NOTIFY_BAD; 692 return NOTIFY_BAD;
648 } 693 }
@@ -692,7 +737,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
692 737
693 return NOTIFY_OK; 738 return NOTIFY_OK;
694} 739}
695#endif
696 740
697void init_workqueues(void) 741void init_workqueues(void)
698{ 742{