diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 05:34:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:52 -0400 |
commit | d721304dce0ced0b3b0366996cc02929669708a8 (patch) | |
tree | a7c41390d082e4d2c70b185e508368293cf68b92 | |
parent | 319c2a986eb45989690c955d9667b814ef0ed56f (diff) |
workqueue: fix flush_workqueue() vs CPU_DEAD race
Many thanks to Srivatsa Vaddagiri for the helpful discussion and for spotting
the bug in my previous attempt.
work->func() (and thus flush_workqueue()) must not use workqueue_mutex,
this leads to deadlock when CPU_DEAD does kthread_stop(). However without
this mutex held we can't detect CPU_DEAD in progress, which can move pending
works to another CPU while the dead one is not on cpu_online_map.
Change flush_workqueue() to use for_each_possible_cpu(). This means that
flush_cpu_workqueue() may hit CPU which is already dead. However in that
case
!list_empty(&cwq->worklist) || cwq->current_work != NULL
means that CPU_DEAD in progress, it will do kthread_stop() + take_over_work()
so we can proceed and insert a barrier. We hold cwq->lock, so we are safe.
Also, add migrate_sequence incremented by take_over_work() under cwq->lock.
If take_over_work() happened before we checked this CPU, we should see the
new value after spin_unlock().
Further possible changes:
remove CPU_DEAD handling (along with take_over_work, migrate_sequence)
from workqueue.c. CPU_DEAD just sets cwq->please_exit_after_flush flag.
CPU_UP_PREPARE->create_workqueue_thread() clears this flag, and creates
the new thread if cwq->thread == NULL.
This way the workqueue/cpu-hotplug interaction is almost zero, workqueue_mutex
just protects "workqueues" list, CPU_LOCK_ACQUIRE/CPU_LOCK_RELEASE go away.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: "Pallipadi, Venkatesh" <venkatesh.pallipadi@intel.com>
Cc: Gautham shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/workqueue.c | 44 |
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d80dbdceadb8..1d1933cf3778 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -64,6 +64,7 @@ struct workqueue_struct { | |||
64 | 64 | ||
65 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 65 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove |
66 | threads to each one as cpus come/go. */ | 66 | threads to each one as cpus come/go. */ |
67 | static long migrate_sequence __read_mostly; | ||
67 | static DEFINE_MUTEX(workqueue_mutex); | 68 | static DEFINE_MUTEX(workqueue_mutex); |
68 | static LIST_HEAD(workqueues); | 69 | static LIST_HEAD(workqueues); |
69 | 70 | ||
@@ -421,13 +422,7 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
421 | * Probably keventd trying to flush its own queue. So simply run | 422 | * Probably keventd trying to flush its own queue. So simply run |
422 | * it by hand rather than deadlocking. | 423 | * it by hand rather than deadlocking. |
423 | */ | 424 | */ |
424 | preempt_enable(); | ||
425 | /* | ||
426 | * We can still touch *cwq here because we are keventd, and | ||
427 | * hot-unplug will be waiting us to exit. | ||
428 | */ | ||
429 | run_workqueue(cwq); | 425 | run_workqueue(cwq); |
430 | preempt_disable(); | ||
431 | } else { | 426 | } else { |
432 | struct wq_barrier barr; | 427 | struct wq_barrier barr; |
433 | int active = 0; | 428 | int active = 0; |
@@ -439,11 +434,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
439 | } | 434 | } |
440 | spin_unlock_irq(&cwq->lock); | 435 | spin_unlock_irq(&cwq->lock); |
441 | 436 | ||
442 | if (active) { | 437 | if (active) |
443 | preempt_enable(); | ||
444 | wait_for_completion(&barr.done); | 438 | wait_for_completion(&barr.done); |
445 | preempt_disable(); | ||
446 | } | ||
447 | } | 439 | } |
448 | } | 440 | } |
449 | 441 | ||
@@ -462,17 +454,21 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
462 | */ | 454 | */ |
463 | void fastcall flush_workqueue(struct workqueue_struct *wq) | 455 | void fastcall flush_workqueue(struct workqueue_struct *wq) |
464 | { | 456 | { |
465 | preempt_disable(); /* CPU hotplug */ | ||
466 | if (is_single_threaded(wq)) { | 457 | if (is_single_threaded(wq)) { |
467 | /* Always use first cpu's area. */ | 458 | /* Always use first cpu's area. */ |
468 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); | 459 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); |
469 | } else { | 460 | } else { |
461 | long sequence; | ||
470 | int cpu; | 462 | int cpu; |
463 | again: | ||
464 | sequence = migrate_sequence; | ||
471 | 465 | ||
472 | for_each_online_cpu(cpu) | 466 | for_each_possible_cpu(cpu) |
473 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 467 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
468 | |||
469 | if (unlikely(sequence != migrate_sequence)) | ||
470 | goto again; | ||
474 | } | 471 | } |
475 | preempt_enable(); | ||
476 | } | 472 | } |
477 | EXPORT_SYMBOL_GPL(flush_workqueue); | 473 | EXPORT_SYMBOL_GPL(flush_workqueue); |
478 | 474 | ||
@@ -544,17 +540,21 @@ out: | |||
544 | } | 540 | } |
545 | EXPORT_SYMBOL_GPL(flush_work); | 541 | EXPORT_SYMBOL_GPL(flush_work); |
546 | 542 | ||
547 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 543 | static void init_cpu_workqueue(struct workqueue_struct *wq, int cpu) |
548 | int cpu) | ||
549 | { | 544 | { |
550 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 545 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
551 | struct task_struct *p; | ||
552 | 546 | ||
553 | spin_lock_init(&cwq->lock); | ||
554 | cwq->wq = wq; | 547 | cwq->wq = wq; |
555 | cwq->thread = NULL; | 548 | spin_lock_init(&cwq->lock); |
556 | INIT_LIST_HEAD(&cwq->worklist); | 549 | INIT_LIST_HEAD(&cwq->worklist); |
557 | init_waitqueue_head(&cwq->more_work); | 550 | init_waitqueue_head(&cwq->more_work); |
551 | } | ||
552 | |||
553 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | ||
554 | int cpu) | ||
555 | { | ||
556 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | ||
557 | struct task_struct *p; | ||
558 | 558 | ||
559 | if (is_single_threaded(wq)) | 559 | if (is_single_threaded(wq)) |
560 | p = kthread_create(worker_thread, cwq, "%s", wq->name); | 560 | p = kthread_create(worker_thread, cwq, "%s", wq->name); |
@@ -589,6 +589,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
589 | mutex_lock(&workqueue_mutex); | 589 | mutex_lock(&workqueue_mutex); |
590 | if (singlethread) { | 590 | if (singlethread) { |
591 | INIT_LIST_HEAD(&wq->list); | 591 | INIT_LIST_HEAD(&wq->list); |
592 | init_cpu_workqueue(wq, singlethread_cpu); | ||
592 | p = create_workqueue_thread(wq, singlethread_cpu); | 593 | p = create_workqueue_thread(wq, singlethread_cpu); |
593 | if (!p) | 594 | if (!p) |
594 | destroy = 1; | 595 | destroy = 1; |
@@ -596,7 +597,11 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
596 | wake_up_process(p); | 597 | wake_up_process(p); |
597 | } else { | 598 | } else { |
598 | list_add(&wq->list, &workqueues); | 599 | list_add(&wq->list, &workqueues); |
599 | for_each_online_cpu(cpu) { | 600 | for_each_possible_cpu(cpu) { |
601 | init_cpu_workqueue(wq, cpu); | ||
602 | if (!cpu_online(cpu)) | ||
603 | continue; | ||
604 | |||
600 | p = create_workqueue_thread(wq, cpu); | 605 | p = create_workqueue_thread(wq, cpu); |
601 | if (p) { | 606 | if (p) { |
602 | kthread_bind(p, cpu); | 607 | kthread_bind(p, cpu); |
@@ -831,6 +836,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) | |||
831 | 836 | ||
832 | spin_lock_irq(&cwq->lock); | 837 | spin_lock_irq(&cwq->lock); |
833 | list_replace_init(&cwq->worklist, &list); | 838 | list_replace_init(&cwq->worklist, &list); |
839 | migrate_sequence++; | ||
834 | 840 | ||
835 | while (!list_empty(&list)) { | 841 | while (!list_empty(&list)) { |
836 | printk("Taking work for %s\n", wq->name); | 842 | printk("Taking work for %s\n", wq->name); |