diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2008-07-25 04:47:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 13:53:41 -0400 |
commit | 8448502cfc915f70e3f8923849ade27d472044cb (patch) | |
tree | 964522eb36ea5424ece910092da5dce773c1f6cc /kernel/workqueue.c | |
parent | 8de6d308bab4f67fcf953562f9f08f9527cad72d (diff) |
workqueues: do CPU_UP_CANCELED if CPU_UP_PREPARE fails
The bug was pointed out by Akinobu Mita <akinobu.mita@gmail.com>, and this
patch is based on his original patch.
workqueue_cpu_callback(CPU_UP_PREPARE) expects that if it returns
NOTIFY_BAD, _cpu_up() will send CPU_UP_CANCELED then.
However, this is not true since
"cpu hotplug: cpu: deliver CPU_UP_CANCELED only to NOTIFY_OKed callbacks with CPU_UP_PREPARE"
commit: a0d8cdb652d35af9319a9e0fb7134de2a276c636
The callback which has returned NOTIFY_BAD will not receive
CPU_UP_CANCELED. Change the code to fulfil the CPU_UP_CANCELED logic if
CPU_UP_PREPARE fails.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Reported-by: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7cf430372f89..ec7e4f62aaff 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -911,6 +911,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
911 | unsigned int cpu = (unsigned long)hcpu; | 911 | unsigned int cpu = (unsigned long)hcpu; |
912 | struct cpu_workqueue_struct *cwq; | 912 | struct cpu_workqueue_struct *cwq; |
913 | struct workqueue_struct *wq; | 913 | struct workqueue_struct *wq; |
914 | int ret = NOTIFY_OK; | ||
914 | 915 | ||
915 | action &= ~CPU_TASKS_FROZEN; | 916 | action &= ~CPU_TASKS_FROZEN; |
916 | 917 | ||
@@ -918,7 +919,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
918 | case CPU_UP_PREPARE: | 919 | case CPU_UP_PREPARE: |
919 | cpu_set(cpu, cpu_populated_map); | 920 | cpu_set(cpu, cpu_populated_map); |
920 | } | 921 | } |
921 | 922 | undo: | |
922 | list_for_each_entry(wq, &workqueues, list) { | 923 | list_for_each_entry(wq, &workqueues, list) { |
923 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 924 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
924 | 925 | ||
@@ -928,7 +929,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
928 | break; | 929 | break; |
929 | printk(KERN_ERR "workqueue [%s] for %i failed\n", | 930 | printk(KERN_ERR "workqueue [%s] for %i failed\n", |
930 | wq->name, cpu); | 931 | wq->name, cpu); |
931 | return NOTIFY_BAD; | 932 | action = CPU_UP_CANCELED; |
933 | ret = NOTIFY_BAD; | ||
934 | goto undo; | ||
932 | 935 | ||
933 | case CPU_ONLINE: | 936 | case CPU_ONLINE: |
934 | start_workqueue_thread(cwq, cpu); | 937 | start_workqueue_thread(cwq, cpu); |
@@ -948,7 +951,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
948 | cpu_clear(cpu, cpu_populated_map); | 951 | cpu_clear(cpu, cpu_populated_map); |
949 | } | 952 | } |
950 | 953 | ||
951 | return NOTIFY_OK; | 954 | return ret; |
952 | } | 955 | } |
953 | 956 | ||
954 | void __init init_workqueues(void) | 957 | void __init init_workqueues(void) |