diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2009-04-02 19:58:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-02 22:05:00 -0400 |
commit | 2355b70fd59cb5be7de2052a9edeee7afb7ff099 (patch) | |
tree | 93f192f9dab4748a313ec8d78041b1937e232f33 /kernel/workqueue.c | |
parent | 1ee1184485df9c9a3503d3a684b911fb7c73d259 (diff) |
workqueue: avoid recursion in run_workqueue()
1) lockdep will complain when run_workqueue() performs recursion.
2) The recursive implementation of run_workqueue() means that
flush_workqueue() and its documentation are inconsistent. This may
hide deadlocks and other bugs.
3) The recursion in run_workqueue() will poison cwq->current_work, but
flush_work() and __cancel_work_timer(), etcetera need a reliable
cwq->current_work.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 41 |
1 files changed, 11 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9aedd9fd825b..32f8e0d2bf5a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -48,8 +48,6 @@ struct cpu_workqueue_struct { | |||
48 | 48 | ||
49 | struct workqueue_struct *wq; | 49 | struct workqueue_struct *wq; |
50 | struct task_struct *thread; | 50 | struct task_struct *thread; |
51 | |||
52 | int run_depth; /* Detect run_workqueue() recursion depth */ | ||
53 | } ____cacheline_aligned; | 51 | } ____cacheline_aligned; |
54 | 52 | ||
55 | /* | 53 | /* |
@@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); | |||
262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 260 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
263 | { | 261 | { |
264 | spin_lock_irq(&cwq->lock); | 262 | spin_lock_irq(&cwq->lock); |
265 | cwq->run_depth++; | ||
266 | if (cwq->run_depth > 3) { | ||
267 | /* morton gets to eat his hat */ | ||
268 | printk("%s: recursion depth exceeded: %d\n", | ||
269 | __func__, cwq->run_depth); | ||
270 | dump_stack(); | ||
271 | } | ||
272 | while (!list_empty(&cwq->worklist)) { | 263 | while (!list_empty(&cwq->worklist)) { |
273 | struct work_struct *work = list_entry(cwq->worklist.next, | 264 | struct work_struct *work = list_entry(cwq->worklist.next, |
274 | struct work_struct, entry); | 265 | struct work_struct, entry); |
@@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
311 | spin_lock_irq(&cwq->lock); | 302 | spin_lock_irq(&cwq->lock); |
312 | cwq->current_work = NULL; | 303 | cwq->current_work = NULL; |
313 | } | 304 | } |
314 | cwq->run_depth--; | ||
315 | spin_unlock_irq(&cwq->lock); | 305 | spin_unlock_irq(&cwq->lock); |
316 | } | 306 | } |
317 | 307 | ||
@@ -368,29 +358,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
368 | 358 | ||
369 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 359 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
370 | { | 360 | { |
371 | int active; | 361 | int active = 0; |
362 | struct wq_barrier barr; | ||
372 | 363 | ||
373 | if (cwq->thread == current) { | 364 | WARN_ON(cwq->thread == current); |
374 | /* | ||
375 | * Probably keventd trying to flush its own queue. So simply run | ||
376 | * it by hand rather than deadlocking. | ||
377 | */ | ||
378 | run_workqueue(cwq); | ||
379 | active = 1; | ||
380 | } else { | ||
381 | struct wq_barrier barr; | ||
382 | 365 | ||
383 | active = 0; | 366 | spin_lock_irq(&cwq->lock); |
384 | spin_lock_irq(&cwq->lock); | 367 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
385 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 368 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
386 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | 369 | active = 1; |
387 | active = 1; | ||
388 | } | ||
389 | spin_unlock_irq(&cwq->lock); | ||
390 | |||
391 | if (active) | ||
392 | wait_for_completion(&barr.done); | ||
393 | } | 370 | } |
371 | spin_unlock_irq(&cwq->lock); | ||
372 | |||
373 | if (active) | ||
374 | wait_for_completion(&barr.done); | ||
394 | 375 | ||
395 | return active; | 376 | return active; |
396 | } | 377 | } |