diff options
| author | Andrew Morton <akpm@osdl.org> | 2007-05-09 05:33:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:51 -0400 |
| commit | edab2516a6c1752e8e5e3d55727cabf12346e5df (patch) | |
| tree | 235f0e8dd26341a650cc3329e4fc54b3b857aa07 | |
| parent | b89deed32ccc96098bd6bc953c64bba6b847774f (diff) | |
flush_workqueue(): use preempt_disable to hold off cpu hotplug
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Gautham Shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | kernel/workqueue.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 918d55267a12..5176d51bcc2a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -419,18 +419,22 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 419 | * Probably keventd trying to flush its own queue. So simply run | 419 | * Probably keventd trying to flush its own queue. So simply run |
| 420 | * it by hand rather than deadlocking. | 420 | * it by hand rather than deadlocking. |
| 421 | */ | 421 | */ |
| 422 | mutex_unlock(&workqueue_mutex); | 422 | preempt_enable(); |
| 423 | /* | ||
| 424 | * We can still touch *cwq here because we are keventd, and | ||
| 425 | * hot-unplug will be waiting us to exit. | ||
| 426 | */ | ||
| 423 | run_workqueue(cwq); | 427 | run_workqueue(cwq); |
| 424 | mutex_lock(&workqueue_mutex); | 428 | preempt_disable(); |
| 425 | } else { | 429 | } else { |
| 426 | struct wq_barrier barr; | 430 | struct wq_barrier barr; |
| 427 | 431 | ||
| 428 | init_wq_barrier(&barr); | 432 | init_wq_barrier(&barr); |
| 429 | __queue_work(cwq, &barr.work); | 433 | __queue_work(cwq, &barr.work); |
| 430 | 434 | ||
| 431 | mutex_unlock(&workqueue_mutex); | 435 | preempt_enable(); /* Can no longer touch *cwq */ |
| 432 | wait_for_completion(&barr.done); | 436 | wait_for_completion(&barr.done); |
| 433 | mutex_lock(&workqueue_mutex); | 437 | preempt_disable(); |
| 434 | } | 438 | } |
| 435 | } | 439 | } |
| 436 | 440 | ||
| @@ -449,7 +453,7 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 449 | */ | 453 | */ |
| 450 | void fastcall flush_workqueue(struct workqueue_struct *wq) | 454 | void fastcall flush_workqueue(struct workqueue_struct *wq) |
| 451 | { | 455 | { |
| 452 | mutex_lock(&workqueue_mutex); | 456 | preempt_disable(); /* CPU hotplug */ |
| 453 | if (is_single_threaded(wq)) { | 457 | if (is_single_threaded(wq)) { |
| 454 | /* Always use first cpu's area. */ | 458 | /* Always use first cpu's area. */ |
| 455 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); | 459 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); |
| @@ -459,7 +463,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
| 459 | for_each_online_cpu(cpu) | 463 | for_each_online_cpu(cpu) |
| 460 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 464 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
| 461 | } | 465 | } |
| 462 | mutex_unlock(&workqueue_mutex); | 466 | preempt_enable(); |
| 463 | } | 467 | } |
| 464 | EXPORT_SYMBOL_GPL(flush_workqueue); | 468 | EXPORT_SYMBOL_GPL(flush_workqueue); |
| 465 | 469 | ||
