diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 34 | ||||
-rw-r--r-- | kernel/hrtimer.c | 2 | ||||
-rw-r--r-- | kernel/profile.c | 4 | ||||
-rw-r--r-- | kernel/rcupdate.c | 2 | ||||
-rw-r--r-- | kernel/relay.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | kernel/softlockup.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 2 |
10 files changed, 50 insertions, 16 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 28cb6c71a47a..369d2892687d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -120,12 +120,13 @@ static int take_cpu_down(void *unused) | |||
120 | } | 120 | } |
121 | 121 | ||
122 | /* Requires cpu_add_remove_lock to be held */ | 122 | /* Requires cpu_add_remove_lock to be held */ |
123 | static int _cpu_down(unsigned int cpu) | 123 | static int _cpu_down(unsigned int cpu, int tasks_frozen) |
124 | { | 124 | { |
125 | int err, nr_calls = 0; | 125 | int err, nr_calls = 0; |
126 | struct task_struct *p; | 126 | struct task_struct *p; |
127 | cpumask_t old_allowed, tmp; | 127 | cpumask_t old_allowed, tmp; |
128 | void *hcpu = (void *)(long)cpu; | 128 | void *hcpu = (void *)(long)cpu; |
129 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | ||
129 | 130 | ||
130 | if (num_online_cpus() == 1) | 131 | if (num_online_cpus() == 1) |
131 | return -EBUSY; | 132 | return -EBUSY; |
@@ -134,11 +135,11 @@ static int _cpu_down(unsigned int cpu) | |||
134 | return -EINVAL; | 135 | return -EINVAL; |
135 | 136 | ||
136 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); | 137 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); |
137 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 138 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
138 | hcpu, -1, &nr_calls); | 139 | hcpu, -1, &nr_calls); |
139 | if (err == NOTIFY_BAD) { | 140 | if (err == NOTIFY_BAD) { |
140 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, | 141 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
141 | nr_calls, NULL); | 142 | hcpu, nr_calls, NULL); |
142 | printk("%s: attempt to take down CPU %u failed\n", | 143 | printk("%s: attempt to take down CPU %u failed\n", |
143 | __FUNCTION__, cpu); | 144 | __FUNCTION__, cpu); |
144 | err = -EINVAL; | 145 | err = -EINVAL; |
@@ -157,7 +158,7 @@ static int _cpu_down(unsigned int cpu) | |||
157 | 158 | ||
158 | if (IS_ERR(p) || cpu_online(cpu)) { | 159 | if (IS_ERR(p) || cpu_online(cpu)) { |
159 | /* CPU didn't die: tell everyone. Can't complain. */ | 160 | /* CPU didn't die: tell everyone. Can't complain. */ |
160 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 161 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
161 | hcpu) == NOTIFY_BAD) | 162 | hcpu) == NOTIFY_BAD) |
162 | BUG(); | 163 | BUG(); |
163 | 164 | ||
@@ -176,7 +177,8 @@ static int _cpu_down(unsigned int cpu) | |||
176 | __cpu_die(cpu); | 177 | __cpu_die(cpu); |
177 | 178 | ||
178 | /* CPU is completely dead: tell everyone. Too late to complain. */ | 179 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
179 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu) == NOTIFY_BAD) | 180 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, |
181 | hcpu) == NOTIFY_BAD) | ||
180 | BUG(); | 182 | BUG(); |
181 | 183 | ||
182 | check_for_tasks(cpu); | 184 | check_for_tasks(cpu); |
@@ -186,8 +188,7 @@ out_thread: | |||
186 | out_allowed: | 188 | out_allowed: |
187 | set_cpus_allowed(current, old_allowed); | 189 | set_cpus_allowed(current, old_allowed); |
188 | out_release: | 190 | out_release: |
189 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, | 191 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); |
190 | (void *)(long)cpu); | ||
191 | return err; | 192 | return err; |
192 | } | 193 | } |
193 | 194 | ||
@@ -199,7 +200,7 @@ int cpu_down(unsigned int cpu) | |||
199 | if (cpu_hotplug_disabled) | 200 | if (cpu_hotplug_disabled) |
200 | err = -EBUSY; | 201 | err = -EBUSY; |
201 | else | 202 | else |
202 | err = _cpu_down(cpu); | 203 | err = _cpu_down(cpu, 0); |
203 | 204 | ||
204 | mutex_unlock(&cpu_add_remove_lock); | 205 | mutex_unlock(&cpu_add_remove_lock); |
205 | return err; | 206 | return err; |
@@ -207,16 +208,17 @@ int cpu_down(unsigned int cpu) | |||
207 | #endif /*CONFIG_HOTPLUG_CPU*/ | 208 | #endif /*CONFIG_HOTPLUG_CPU*/ |
208 | 209 | ||
209 | /* Requires cpu_add_remove_lock to be held */ | 210 | /* Requires cpu_add_remove_lock to be held */ |
210 | static int __cpuinit _cpu_up(unsigned int cpu) | 211 | static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) |
211 | { | 212 | { |
212 | int ret, nr_calls = 0; | 213 | int ret, nr_calls = 0; |
213 | void *hcpu = (void *)(long)cpu; | 214 | void *hcpu = (void *)(long)cpu; |
215 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | ||
214 | 216 | ||
215 | if (cpu_online(cpu) || !cpu_present(cpu)) | 217 | if (cpu_online(cpu) || !cpu_present(cpu)) |
216 | return -EINVAL; | 218 | return -EINVAL; |
217 | 219 | ||
218 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); | 220 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); |
219 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, | 221 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, |
220 | -1, &nr_calls); | 222 | -1, &nr_calls); |
221 | if (ret == NOTIFY_BAD) { | 223 | if (ret == NOTIFY_BAD) { |
222 | printk("%s: attempt to bring up CPU %u failed\n", | 224 | printk("%s: attempt to bring up CPU %u failed\n", |
@@ -234,12 +236,12 @@ static int __cpuinit _cpu_up(unsigned int cpu) | |||
234 | BUG_ON(!cpu_online(cpu)); | 236 | BUG_ON(!cpu_online(cpu)); |
235 | 237 | ||
236 | /* Now call notifier in preparation. */ | 238 | /* Now call notifier in preparation. */ |
237 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); | 239 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); |
238 | 240 | ||
239 | out_notify: | 241 | out_notify: |
240 | if (ret != 0) | 242 | if (ret != 0) |
241 | __raw_notifier_call_chain(&cpu_chain, | 243 | __raw_notifier_call_chain(&cpu_chain, |
242 | CPU_UP_CANCELED, hcpu, nr_calls, NULL); | 244 | CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
243 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); | 245 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); |
244 | 246 | ||
245 | return ret; | 247 | return ret; |
@@ -253,7 +255,7 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
253 | if (cpu_hotplug_disabled) | 255 | if (cpu_hotplug_disabled) |
254 | err = -EBUSY; | 256 | err = -EBUSY; |
255 | else | 257 | else |
256 | err = _cpu_up(cpu); | 258 | err = _cpu_up(cpu, 0); |
257 | 259 | ||
258 | mutex_unlock(&cpu_add_remove_lock); | 260 | mutex_unlock(&cpu_add_remove_lock); |
259 | return err; | 261 | return err; |
@@ -283,7 +285,7 @@ int disable_nonboot_cpus(void) | |||
283 | for_each_online_cpu(cpu) { | 285 | for_each_online_cpu(cpu) { |
284 | if (cpu == first_cpu) | 286 | if (cpu == first_cpu) |
285 | continue; | 287 | continue; |
286 | error = _cpu_down(cpu); | 288 | error = _cpu_down(cpu, 1); |
287 | if (!error) { | 289 | if (!error) { |
288 | cpu_set(cpu, frozen_cpus); | 290 | cpu_set(cpu, frozen_cpus); |
289 | printk("CPU%d is down\n", cpu); | 291 | printk("CPU%d is down\n", cpu); |
@@ -318,7 +320,7 @@ void enable_nonboot_cpus(void) | |||
318 | suspend_cpu_hotplug = 1; | 320 | suspend_cpu_hotplug = 1; |
319 | printk("Enabling non-boot CPUs ...\n"); | 321 | printk("Enabling non-boot CPUs ...\n"); |
320 | for_each_cpu_mask(cpu, frozen_cpus) { | 322 | for_each_cpu_mask(cpu, frozen_cpus) { |
321 | error = _cpu_up(cpu); | 323 | error = _cpu_up(cpu, 1); |
322 | if (!error) { | 324 | if (!error) { |
323 | printk("CPU%d is up\n", cpu); | 325 | printk("CPU%d is up\n", cpu); |
324 | continue; | 326 | continue; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c9f4f044a8a8..23c03f43e196 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1411,11 +1411,13 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
1411 | switch (action) { | 1411 | switch (action) { |
1412 | 1412 | ||
1413 | case CPU_UP_PREPARE: | 1413 | case CPU_UP_PREPARE: |
1414 | case CPU_UP_PREPARE_FROZEN: | ||
1414 | init_hrtimers_cpu(cpu); | 1415 | init_hrtimers_cpu(cpu); |
1415 | break; | 1416 | break; |
1416 | 1417 | ||
1417 | #ifdef CONFIG_HOTPLUG_CPU | 1418 | #ifdef CONFIG_HOTPLUG_CPU |
1418 | case CPU_DEAD: | 1419 | case CPU_DEAD: |
1420 | case CPU_DEAD_FROZEN: | ||
1419 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); | 1421 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); |
1420 | migrate_hrtimers(cpu); | 1422 | migrate_hrtimers(cpu); |
1421 | break; | 1423 | break; |
diff --git a/kernel/profile.c b/kernel/profile.c index 9bfadb248dd8..cc91b9bf759d 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -340,6 +340,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, | |||
340 | 340 | ||
341 | switch (action) { | 341 | switch (action) { |
342 | case CPU_UP_PREPARE: | 342 | case CPU_UP_PREPARE: |
343 | case CPU_UP_PREPARE_FROZEN: | ||
343 | node = cpu_to_node(cpu); | 344 | node = cpu_to_node(cpu); |
344 | per_cpu(cpu_profile_flip, cpu) = 0; | 345 | per_cpu(cpu_profile_flip, cpu) = 0; |
345 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { | 346 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { |
@@ -365,10 +366,13 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, | |||
365 | __free_page(page); | 366 | __free_page(page); |
366 | return NOTIFY_BAD; | 367 | return NOTIFY_BAD; |
367 | case CPU_ONLINE: | 368 | case CPU_ONLINE: |
369 | case CPU_ONLINE_FROZEN: | ||
368 | cpu_set(cpu, prof_cpu_mask); | 370 | cpu_set(cpu, prof_cpu_mask); |
369 | break; | 371 | break; |
370 | case CPU_UP_CANCELED: | 372 | case CPU_UP_CANCELED: |
373 | case CPU_UP_CANCELED_FROZEN: | ||
371 | case CPU_DEAD: | 374 | case CPU_DEAD: |
375 | case CPU_DEAD_FROZEN: | ||
372 | cpu_clear(cpu, prof_cpu_mask); | 376 | cpu_clear(cpu, prof_cpu_mask); |
373 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | 377 | if (per_cpu(cpu_profile_hits, cpu)[0]) { |
374 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | 378 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 3554b76da84c..2c2dd8410dc4 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -558,9 +558,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
558 | long cpu = (long)hcpu; | 558 | long cpu = (long)hcpu; |
559 | switch (action) { | 559 | switch (action) { |
560 | case CPU_UP_PREPARE: | 560 | case CPU_UP_PREPARE: |
561 | case CPU_UP_PREPARE_FROZEN: | ||
561 | rcu_online_cpu(cpu); | 562 | rcu_online_cpu(cpu); |
562 | break; | 563 | break; |
563 | case CPU_DEAD: | 564 | case CPU_DEAD: |
565 | case CPU_DEAD_FROZEN: | ||
564 | rcu_offline_cpu(cpu); | 566 | rcu_offline_cpu(cpu); |
565 | break; | 567 | break; |
566 | default: | 568 | default: |
diff --git a/kernel/relay.c b/kernel/relay.c index e804589c863c..61a504900eaa 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -484,6 +484,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, | |||
484 | 484 | ||
485 | switch(action) { | 485 | switch(action) { |
486 | case CPU_UP_PREPARE: | 486 | case CPU_UP_PREPARE: |
487 | case CPU_UP_PREPARE_FROZEN: | ||
487 | mutex_lock(&relay_channels_mutex); | 488 | mutex_lock(&relay_channels_mutex); |
488 | list_for_each_entry(chan, &relay_channels, list) { | 489 | list_for_each_entry(chan, &relay_channels, list) { |
489 | if (chan->buf[hotcpu]) | 490 | if (chan->buf[hotcpu]) |
@@ -500,6 +501,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, | |||
500 | mutex_unlock(&relay_channels_mutex); | 501 | mutex_unlock(&relay_channels_mutex); |
501 | break; | 502 | break; |
502 | case CPU_DEAD: | 503 | case CPU_DEAD: |
504 | case CPU_DEAD_FROZEN: | ||
503 | /* No need to flush the cpu : will be flushed upon | 505 | /* No need to flush the cpu : will be flushed upon |
504 | * final relay_flush() call. */ | 506 | * final relay_flush() call. */ |
505 | break; | 507 | break; |
diff --git a/kernel/sched.c b/kernel/sched.c index fe1a9c2b855a..799d23b4e35d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5394,6 +5394,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5394 | break; | 5394 | break; |
5395 | 5395 | ||
5396 | case CPU_UP_PREPARE: | 5396 | case CPU_UP_PREPARE: |
5397 | case CPU_UP_PREPARE_FROZEN: | ||
5397 | p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); | 5398 | p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); |
5398 | if (IS_ERR(p)) | 5399 | if (IS_ERR(p)) |
5399 | return NOTIFY_BAD; | 5400 | return NOTIFY_BAD; |
@@ -5407,12 +5408,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5407 | break; | 5408 | break; |
5408 | 5409 | ||
5409 | case CPU_ONLINE: | 5410 | case CPU_ONLINE: |
5411 | case CPU_ONLINE_FROZEN: | ||
5410 | /* Strictly unneccessary, as first user will wake it. */ | 5412 | /* Strictly unneccessary, as first user will wake it. */ |
5411 | wake_up_process(cpu_rq(cpu)->migration_thread); | 5413 | wake_up_process(cpu_rq(cpu)->migration_thread); |
5412 | break; | 5414 | break; |
5413 | 5415 | ||
5414 | #ifdef CONFIG_HOTPLUG_CPU | 5416 | #ifdef CONFIG_HOTPLUG_CPU |
5415 | case CPU_UP_CANCELED: | 5417 | case CPU_UP_CANCELED: |
5418 | case CPU_UP_CANCELED_FROZEN: | ||
5416 | if (!cpu_rq(cpu)->migration_thread) | 5419 | if (!cpu_rq(cpu)->migration_thread) |
5417 | break; | 5420 | break; |
5418 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 5421 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
@@ -5423,6 +5426,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5423 | break; | 5426 | break; |
5424 | 5427 | ||
5425 | case CPU_DEAD: | 5428 | case CPU_DEAD: |
5429 | case CPU_DEAD_FROZEN: | ||
5426 | migrate_live_tasks(cpu); | 5430 | migrate_live_tasks(cpu); |
5427 | rq = cpu_rq(cpu); | 5431 | rq = cpu_rq(cpu); |
5428 | kthread_stop(rq->migration_thread); | 5432 | kthread_stop(rq->migration_thread); |
@@ -6912,14 +6916,20 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
6912 | { | 6916 | { |
6913 | switch (action) { | 6917 | switch (action) { |
6914 | case CPU_UP_PREPARE: | 6918 | case CPU_UP_PREPARE: |
6919 | case CPU_UP_PREPARE_FROZEN: | ||
6915 | case CPU_DOWN_PREPARE: | 6920 | case CPU_DOWN_PREPARE: |
6921 | case CPU_DOWN_PREPARE_FROZEN: | ||
6916 | detach_destroy_domains(&cpu_online_map); | 6922 | detach_destroy_domains(&cpu_online_map); |
6917 | return NOTIFY_OK; | 6923 | return NOTIFY_OK; |
6918 | 6924 | ||
6919 | case CPU_UP_CANCELED: | 6925 | case CPU_UP_CANCELED: |
6926 | case CPU_UP_CANCELED_FROZEN: | ||
6920 | case CPU_DOWN_FAILED: | 6927 | case CPU_DOWN_FAILED: |
6928 | case CPU_DOWN_FAILED_FROZEN: | ||
6921 | case CPU_ONLINE: | 6929 | case CPU_ONLINE: |
6930 | case CPU_ONLINE_FROZEN: | ||
6922 | case CPU_DEAD: | 6931 | case CPU_DEAD: |
6932 | case CPU_DEAD_FROZEN: | ||
6923 | /* | 6933 | /* |
6924 | * Fall through and re-initialise the domains. | 6934 | * Fall through and re-initialise the domains. |
6925 | */ | 6935 | */ |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 8b75008e2bd8..0b9886a00e74 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -593,6 +593,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
593 | 593 | ||
594 | switch (action) { | 594 | switch (action) { |
595 | case CPU_UP_PREPARE: | 595 | case CPU_UP_PREPARE: |
596 | case CPU_UP_PREPARE_FROZEN: | ||
596 | p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); | 597 | p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); |
597 | if (IS_ERR(p)) { | 598 | if (IS_ERR(p)) { |
598 | printk("ksoftirqd for %i failed\n", hotcpu); | 599 | printk("ksoftirqd for %i failed\n", hotcpu); |
@@ -602,16 +603,19 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
602 | per_cpu(ksoftirqd, hotcpu) = p; | 603 | per_cpu(ksoftirqd, hotcpu) = p; |
603 | break; | 604 | break; |
604 | case CPU_ONLINE: | 605 | case CPU_ONLINE: |
606 | case CPU_ONLINE_FROZEN: | ||
605 | wake_up_process(per_cpu(ksoftirqd, hotcpu)); | 607 | wake_up_process(per_cpu(ksoftirqd, hotcpu)); |
606 | break; | 608 | break; |
607 | #ifdef CONFIG_HOTPLUG_CPU | 609 | #ifdef CONFIG_HOTPLUG_CPU |
608 | case CPU_UP_CANCELED: | 610 | case CPU_UP_CANCELED: |
611 | case CPU_UP_CANCELED_FROZEN: | ||
609 | if (!per_cpu(ksoftirqd, hotcpu)) | 612 | if (!per_cpu(ksoftirqd, hotcpu)) |
610 | break; | 613 | break; |
611 | /* Unbind so it can run. Fall thru. */ | 614 | /* Unbind so it can run. Fall thru. */ |
612 | kthread_bind(per_cpu(ksoftirqd, hotcpu), | 615 | kthread_bind(per_cpu(ksoftirqd, hotcpu), |
613 | any_online_cpu(cpu_online_map)); | 616 | any_online_cpu(cpu_online_map)); |
614 | case CPU_DEAD: | 617 | case CPU_DEAD: |
618 | case CPU_DEAD_FROZEN: | ||
615 | p = per_cpu(ksoftirqd, hotcpu); | 619 | p = per_cpu(ksoftirqd, hotcpu); |
616 | per_cpu(ksoftirqd, hotcpu) = NULL; | 620 | per_cpu(ksoftirqd, hotcpu) = NULL; |
617 | kthread_stop(p); | 621 | kthread_stop(p); |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 8fa7040247ad..0131e296ffb4 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -146,6 +146,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
146 | 146 | ||
147 | switch (action) { | 147 | switch (action) { |
148 | case CPU_UP_PREPARE: | 148 | case CPU_UP_PREPARE: |
149 | case CPU_UP_PREPARE_FROZEN: | ||
149 | BUG_ON(per_cpu(watchdog_task, hotcpu)); | 150 | BUG_ON(per_cpu(watchdog_task, hotcpu)); |
150 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); | 151 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); |
151 | if (IS_ERR(p)) { | 152 | if (IS_ERR(p)) { |
@@ -157,16 +158,19 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
157 | kthread_bind(p, hotcpu); | 158 | kthread_bind(p, hotcpu); |
158 | break; | 159 | break; |
159 | case CPU_ONLINE: | 160 | case CPU_ONLINE: |
161 | case CPU_ONLINE_FROZEN: | ||
160 | wake_up_process(per_cpu(watchdog_task, hotcpu)); | 162 | wake_up_process(per_cpu(watchdog_task, hotcpu)); |
161 | break; | 163 | break; |
162 | #ifdef CONFIG_HOTPLUG_CPU | 164 | #ifdef CONFIG_HOTPLUG_CPU |
163 | case CPU_UP_CANCELED: | 165 | case CPU_UP_CANCELED: |
166 | case CPU_UP_CANCELED_FROZEN: | ||
164 | if (!per_cpu(watchdog_task, hotcpu)) | 167 | if (!per_cpu(watchdog_task, hotcpu)) |
165 | break; | 168 | break; |
166 | /* Unbind so it can run. Fall thru. */ | 169 | /* Unbind so it can run. Fall thru. */ |
167 | kthread_bind(per_cpu(watchdog_task, hotcpu), | 170 | kthread_bind(per_cpu(watchdog_task, hotcpu), |
168 | any_online_cpu(cpu_online_map)); | 171 | any_online_cpu(cpu_online_map)); |
169 | case CPU_DEAD: | 172 | case CPU_DEAD: |
173 | case CPU_DEAD_FROZEN: | ||
170 | p = per_cpu(watchdog_task, hotcpu); | 174 | p = per_cpu(watchdog_task, hotcpu); |
171 | per_cpu(watchdog_task, hotcpu) = NULL; | 175 | per_cpu(watchdog_task, hotcpu) = NULL; |
172 | kthread_stop(p); | 176 | kthread_stop(p); |
diff --git a/kernel/timer.c b/kernel/timer.c index 58f6dd07c80b..de85f8491c1d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1293,11 +1293,13 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self, | |||
1293 | long cpu = (long)hcpu; | 1293 | long cpu = (long)hcpu; |
1294 | switch(action) { | 1294 | switch(action) { |
1295 | case CPU_UP_PREPARE: | 1295 | case CPU_UP_PREPARE: |
1296 | case CPU_UP_PREPARE_FROZEN: | ||
1296 | if (init_timers_cpu(cpu) < 0) | 1297 | if (init_timers_cpu(cpu) < 0) |
1297 | return NOTIFY_BAD; | 1298 | return NOTIFY_BAD; |
1298 | break; | 1299 | break; |
1299 | #ifdef CONFIG_HOTPLUG_CPU | 1300 | #ifdef CONFIG_HOTPLUG_CPU |
1300 | case CPU_DEAD: | 1301 | case CPU_DEAD: |
1302 | case CPU_DEAD_FROZEN: | ||
1301 | migrate_timers(cpu); | 1303 | migrate_timers(cpu); |
1302 | break; | 1304 | break; |
1303 | #endif | 1305 | #endif |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b976ed87dd37..fb56fedd5c02 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -799,6 +799,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
799 | struct cpu_workqueue_struct *cwq; | 799 | struct cpu_workqueue_struct *cwq; |
800 | struct workqueue_struct *wq; | 800 | struct workqueue_struct *wq; |
801 | 801 | ||
802 | action &= ~CPU_TASKS_FROZEN; | ||
803 | |||
802 | switch (action) { | 804 | switch (action) { |
803 | case CPU_LOCK_ACQUIRE: | 805 | case CPU_LOCK_ACQUIRE: |
804 | mutex_lock(&workqueue_mutex); | 806 | mutex_lock(&workqueue_mutex); |