diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 05:34:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:52 -0400 |
commit | 06ba38a9a0f6ceffe70343f684c5a690e3710ef4 (patch) | |
tree | 500f4d8a3be212ececa80ead44d9658c8b7a4ce1 /kernel | |
parent | c12920d19078eb8fd99560ec232a6e05c6ff1aa8 (diff) |
workqueues: shift kthread_bind() from CPU_UP_PREPARE to CPU_ONLINE
CPU_UP_PREPARE binds cwq->thread to the new CPU. So CPU_UP_CANCELED tries to
wake up the task which is bound to the failed CPU.
With this patch we don't bind cwq->thread until CPU becomes online. The first
wake_up() after kthread_create() is a bit special, make a simple helper for
that.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e858e93886e3..7d1ebfc1a995 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -668,15 +668,21 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
668 | 668 | ||
669 | cwq->thread = p; | 669 | cwq->thread = p; |
670 | cwq->should_stop = 0; | 670 | cwq->should_stop = 0; |
671 | if (!is_single_threaded(wq)) | ||
672 | kthread_bind(p, cpu); | ||
673 | |||
674 | if (is_single_threaded(wq) || cpu_online(cpu)) | ||
675 | wake_up_process(p); | ||
676 | 671 | ||
677 | return 0; | 672 | return 0; |
678 | } | 673 | } |
679 | 674 | ||
675 | static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | ||
676 | { | ||
677 | struct task_struct *p = cwq->thread; | ||
678 | |||
679 | if (p != NULL) { | ||
680 | if (cpu >= 0) | ||
681 | kthread_bind(p, cpu); | ||
682 | wake_up_process(p); | ||
683 | } | ||
684 | } | ||
685 | |||
680 | struct workqueue_struct *__create_workqueue(const char *name, | 686 | struct workqueue_struct *__create_workqueue(const char *name, |
681 | int singlethread, int freezeable) | 687 | int singlethread, int freezeable) |
682 | { | 688 | { |
@@ -702,6 +708,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
702 | if (singlethread) { | 708 | if (singlethread) { |
703 | cwq = init_cpu_workqueue(wq, singlethread_cpu); | 709 | cwq = init_cpu_workqueue(wq, singlethread_cpu); |
704 | err = create_workqueue_thread(cwq, singlethread_cpu); | 710 | err = create_workqueue_thread(cwq, singlethread_cpu); |
711 | start_workqueue_thread(cwq, -1); | ||
705 | } else { | 712 | } else { |
706 | mutex_lock(&workqueue_mutex); | 713 | mutex_lock(&workqueue_mutex); |
707 | list_add(&wq->list, &workqueues); | 714 | list_add(&wq->list, &workqueues); |
@@ -711,6 +718,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
711 | if (err || !cpu_online(cpu)) | 718 | if (err || !cpu_online(cpu)) |
712 | continue; | 719 | continue; |
713 | err = create_workqueue_thread(cwq, cpu); | 720 | err = create_workqueue_thread(cwq, cpu); |
721 | start_workqueue_thread(cwq, cpu); | ||
714 | } | 722 | } |
715 | mutex_unlock(&workqueue_mutex); | 723 | mutex_unlock(&workqueue_mutex); |
716 | } | 724 | } |
@@ -808,12 +816,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
808 | return NOTIFY_BAD; | 816 | return NOTIFY_BAD; |
809 | 817 | ||
810 | case CPU_ONLINE: | 818 | case CPU_ONLINE: |
811 | wake_up_process(cwq->thread); | 819 | start_workqueue_thread(cwq, cpu); |
812 | break; | 820 | break; |
813 | 821 | ||
814 | case CPU_UP_CANCELED: | 822 | case CPU_UP_CANCELED: |
815 | if (cwq->thread) | 823 | start_workqueue_thread(cwq, -1); |
816 | wake_up_process(cwq->thread); | ||
817 | case CPU_DEAD: | 824 | case CPU_DEAD: |
818 | cleanup_workqueue_thread(cwq, cpu); | 825 | cleanup_workqueue_thread(cwq, cpu); |
819 | break; | 826 | break; |