aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-05-09 05:34:10 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:52 -0400
commitf293ea92007419e4f9c52db0cf57af17f45b9f94 (patch)
tree829d06499c1d9004ca530e5f23de43df27d3baa4 /kernel
parent7097a87afe937a5879528d52880c2d95f089e96c (diff)
workqueue: don't save interrupts in run_workqueue()
work->func() may sleep, it's a bug to call run_workqueue() with irqs disabled. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ea422254f8bf..74f3f7825229 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -227,13 +227,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
227 227
228static void run_workqueue(struct cpu_workqueue_struct *cwq) 228static void run_workqueue(struct cpu_workqueue_struct *cwq)
229{ 229{
230 unsigned long flags; 230 spin_lock_irq(&cwq->lock);
231
232 /*
233 * Keep taking off work from the queue until
234 * done.
235 */
236 spin_lock_irqsave(&cwq->lock, flags);
237 cwq->run_depth++; 231 cwq->run_depth++;
238 if (cwq->run_depth > 3) { 232 if (cwq->run_depth > 3) {
239 /* morton gets to eat his hat */ 233 /* morton gets to eat his hat */
@@ -248,7 +242,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
248 242
249 cwq->current_work = work; 243 cwq->current_work = work;
250 list_del_init(cwq->worklist.next); 244 list_del_init(cwq->worklist.next);
251 spin_unlock_irqrestore(&cwq->lock, flags); 245 spin_unlock_irq(&cwq->lock);
252 246
253 BUG_ON(get_wq_data(work) != cwq); 247 BUG_ON(get_wq_data(work) != cwq);
254 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 248 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
@@ -266,11 +260,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
266 dump_stack(); 260 dump_stack();
267 } 261 }
268 262
269 spin_lock_irqsave(&cwq->lock, flags); 263 spin_lock_irq(&cwq->lock);
270 cwq->current_work = NULL; 264 cwq->current_work = NULL;
271 } 265 }
272 cwq->run_depth--; 266 cwq->run_depth--;
273 spin_unlock_irqrestore(&cwq->lock, flags); 267 spin_unlock_irq(&cwq->lock);
274} 268}
275 269
276/* 270/*
@@ -399,6 +393,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
399 */ 393 */
400void fastcall flush_workqueue(struct workqueue_struct *wq) 394void fastcall flush_workqueue(struct workqueue_struct *wq)
401{ 395{
396 might_sleep();
397
402 if (is_single_threaded(wq)) 398 if (is_single_threaded(wq))
403 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 399 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
404 else { 400 else {
@@ -445,6 +441,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
445{ 441{
446 struct cpu_workqueue_struct *cwq; 442 struct cpu_workqueue_struct *cwq;
447 443
444 might_sleep();
445
448 cwq = get_wq_data(work); 446 cwq = get_wq_data(work);
449 /* Was it ever queued ? */ 447 /* Was it ever queued ? */
450 if (!cwq) 448 if (!cwq)