aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c108
1 files changed, 100 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8d1e7cb8a51a..6b186750e9be 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -29,6 +29,9 @@
29#include <linux/kthread.h> 29#include <linux/kthread.h>
30#include <linux/hardirq.h> 30#include <linux/hardirq.h>
31#include <linux/mempolicy.h> 31#include <linux/mempolicy.h>
32#include <linux/freezer.h>
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
32 35
33/* 36/*
34 * The per-CPU workqueue (if single thread, we always use the first 37 * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +58,8 @@ struct cpu_workqueue_struct {
55 struct task_struct *thread; 58 struct task_struct *thread;
56 59
57 int run_depth; /* Detect run_workqueue() recursion depth */ 60 int run_depth; /* Detect run_workqueue() recursion depth */
61
62 int freezeable; /* Freeze the thread during suspend */
58} ____cacheline_aligned; 63} ____cacheline_aligned;
59 64
60/* 65/*
@@ -103,6 +108,79 @@ static inline void *get_wq_data(struct work_struct *work)
103 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 108 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
104} 109}
105 110
111static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
112{
113 int ret = 0;
114 unsigned long flags;
115
116 spin_lock_irqsave(&cwq->lock, flags);
117 /*
118 * We need to re-validate the work info after we've gotten
119 * the cpu_workqueue lock. We can run the work now iff:
120 *
121 * - the wq_data still matches the cpu_workqueue_struct
122 * - AND the work is still marked pending
123 * - AND the work is still on a list (which will be this
124 * workqueue_struct list)
125 *
126 * All these conditions are important, because we
127 * need to protect against the work being run right
128 * now on another CPU (all but the last one might be
129 * true if it's currently running and has not been
130 * released yet, for example).
131 */
132 if (get_wq_data(work) == cwq
133 && work_pending(work)
134 && !list_empty(&work->entry)) {
135 work_func_t f = work->func;
136 list_del_init(&work->entry);
137 spin_unlock_irqrestore(&cwq->lock, flags);
138
139 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
140 work_release(work);
141 f(work);
142
143 spin_lock_irqsave(&cwq->lock, flags);
144 cwq->remove_sequence++;
145 wake_up(&cwq->work_done);
146 ret = 1;
147 }
148 spin_unlock_irqrestore(&cwq->lock, flags);
149 return ret;
150}
151
152/**
153 * run_scheduled_work - run scheduled work synchronously
154 * @work: work to run
155 *
156 * This checks if the work was pending, and runs it
157 * synchronously if so. It returns a boolean to indicate
158 * whether it had any scheduled work to run or not.
159 *
160 * NOTE! This _only_ works for normal work_structs. You
161 * CANNOT use this for delayed work, because the wq data
162 * for delayed work will not point properly to the per-
163 * CPU workqueue struct, but will change!
164 */
165int fastcall run_scheduled_work(struct work_struct *work)
166{
167 for (;;) {
168 struct cpu_workqueue_struct *cwq;
169
170 if (!work_pending(work))
171 return 0;
172 if (list_empty(&work->entry))
173 return 0;
174 /* NOTE! This depends intimately on __queue_work! */
175 cwq = get_wq_data(work);
176 if (!cwq)
177 return 0;
178 if (__run_work(cwq, work))
179 return 1;
180 }
181}
182EXPORT_SYMBOL(run_scheduled_work);
183
106/* Preempt must be disabled. */ 184/* Preempt must be disabled. */
107static void __queue_work(struct cpu_workqueue_struct *cwq, 185static void __queue_work(struct cpu_workqueue_struct *cwq,
108 struct work_struct *work) 186 struct work_struct *work)
@@ -250,6 +328,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
250 work_release(work); 328 work_release(work);
251 f(work); 329 f(work);
252 330
331 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
332 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
333 "%s/0x%08x/%d\n",
334 current->comm, preempt_count(),
335 current->pid);
336 printk(KERN_ERR " last function: ");
337 print_symbol("%s\n", (unsigned long)f);
338 debug_show_held_locks(current);
339 dump_stack();
340 }
341
253 spin_lock_irqsave(&cwq->lock, flags); 342 spin_lock_irqsave(&cwq->lock, flags);
254 cwq->remove_sequence++; 343 cwq->remove_sequence++;
255 wake_up(&cwq->work_done); 344 wake_up(&cwq->work_done);
@@ -265,7 +354,8 @@ static int worker_thread(void *__cwq)
265 struct k_sigaction sa; 354 struct k_sigaction sa;
266 sigset_t blocked; 355 sigset_t blocked;
267 356
268 current->flags |= PF_NOFREEZE; 357 if (!cwq->freezeable)
358 current->flags |= PF_NOFREEZE;
269 359
270 set_user_nice(current, -5); 360 set_user_nice(current, -5);
271 361
@@ -288,6 +378,9 @@ static int worker_thread(void *__cwq)
288 378
289 set_current_state(TASK_INTERRUPTIBLE); 379 set_current_state(TASK_INTERRUPTIBLE);
290 while (!kthread_should_stop()) { 380 while (!kthread_should_stop()) {
381 if (cwq->freezeable)
382 try_to_freeze();
383
291 add_wait_queue(&cwq->more_work, &wait); 384 add_wait_queue(&cwq->more_work, &wait);
292 if (list_empty(&cwq->worklist)) 385 if (list_empty(&cwq->worklist))
293 schedule(); 386 schedule();
@@ -364,7 +457,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
364EXPORT_SYMBOL_GPL(flush_workqueue); 457EXPORT_SYMBOL_GPL(flush_workqueue);
365 458
366static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 459static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
367 int cpu) 460 int cpu, int freezeable)
368{ 461{
369 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 462 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
370 struct task_struct *p; 463 struct task_struct *p;
@@ -374,6 +467,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
374 cwq->thread = NULL; 467 cwq->thread = NULL;
375 cwq->insert_sequence = 0; 468 cwq->insert_sequence = 0;
376 cwq->remove_sequence = 0; 469 cwq->remove_sequence = 0;
470 cwq->freezeable = freezeable;
377 INIT_LIST_HEAD(&cwq->worklist); 471 INIT_LIST_HEAD(&cwq->worklist);
378 init_waitqueue_head(&cwq->more_work); 472 init_waitqueue_head(&cwq->more_work);
379 init_waitqueue_head(&cwq->work_done); 473 init_waitqueue_head(&cwq->work_done);
@@ -389,7 +483,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
389} 483}
390 484
391struct workqueue_struct *__create_workqueue(const char *name, 485struct workqueue_struct *__create_workqueue(const char *name,
392 int singlethread) 486 int singlethread, int freezeable)
393{ 487{
394 int cpu, destroy = 0; 488 int cpu, destroy = 0;
395 struct workqueue_struct *wq; 489 struct workqueue_struct *wq;
@@ -409,7 +503,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
409 mutex_lock(&workqueue_mutex); 503 mutex_lock(&workqueue_mutex);
410 if (singlethread) { 504 if (singlethread) {
411 INIT_LIST_HEAD(&wq->list); 505 INIT_LIST_HEAD(&wq->list);
412 p = create_workqueue_thread(wq, singlethread_cpu); 506 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
413 if (!p) 507 if (!p)
414 destroy = 1; 508 destroy = 1;
415 else 509 else
@@ -417,7 +511,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
417 } else { 511 } else {
418 list_add(&wq->list, &workqueues); 512 list_add(&wq->list, &workqueues);
419 for_each_online_cpu(cpu) { 513 for_each_online_cpu(cpu) {
420 p = create_workqueue_thread(wq, cpu); 514 p = create_workqueue_thread(wq, cpu, freezeable);
421 if (p) { 515 if (p) {
422 kthread_bind(p, cpu); 516 kthread_bind(p, cpu);
423 wake_up_process(p); 517 wake_up_process(p);
@@ -634,7 +728,6 @@ int current_is_keventd(void)
634 728
635} 729}
636 730
637#ifdef CONFIG_HOTPLUG_CPU
638/* Take the work from this (downed) CPU. */ 731/* Take the work from this (downed) CPU. */
639static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 732static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
640{ 733{
@@ -667,7 +760,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
667 mutex_lock(&workqueue_mutex); 760 mutex_lock(&workqueue_mutex);
668 /* Create a new workqueue thread for it. */ 761 /* Create a new workqueue thread for it. */
669 list_for_each_entry(wq, &workqueues, list) { 762 list_for_each_entry(wq, &workqueues, list) {
670 if (!create_workqueue_thread(wq, hotcpu)) { 763 if (!create_workqueue_thread(wq, hotcpu, 0)) {
671 printk("workqueue for %i failed\n", hotcpu); 764 printk("workqueue for %i failed\n", hotcpu);
672 return NOTIFY_BAD; 765 return NOTIFY_BAD;
673 } 766 }
@@ -717,7 +810,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
717 810
718 return NOTIFY_OK; 811 return NOTIFY_OK;
719} 812}
720#endif
721 813
722void init_workqueues(void) 814void init_workqueues(void)
723{ 815{