aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/workqueue.c2
4 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f8d540b324ca..f06d059edef5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4756,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4756 break; 4756 break;
4757#ifdef CONFIG_HOTPLUG_CPU 4757#ifdef CONFIG_HOTPLUG_CPU
4758 case CPU_UP_CANCELED: 4758 case CPU_UP_CANCELED:
4759 if (!cpu_rq(cpu)->migration_thread)
4760 break;
4759 /* Unbind it from offline cpu so it can run. Fall thru. */ 4761 /* Unbind it from offline cpu so it can run. Fall thru. */
4760 kthread_bind(cpu_rq(cpu)->migration_thread, 4762 kthread_bind(cpu_rq(cpu)->migration_thread,
4761 any_online_cpu(cpu_online_map)); 4763 any_online_cpu(cpu_online_map));
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 336f92d64e2e..9e2f1c6e73d7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -470,6 +470,8 @@ static int cpu_callback(struct notifier_block *nfb,
470 break; 470 break;
471#ifdef CONFIG_HOTPLUG_CPU 471#ifdef CONFIG_HOTPLUG_CPU
472 case CPU_UP_CANCELED: 472 case CPU_UP_CANCELED:
473 if (!per_cpu(ksoftirqd, hotcpu))
474 break;
473 /* Unbind so it can run. Fall thru. */ 475 /* Unbind so it can run. Fall thru. */
474 kthread_bind(per_cpu(ksoftirqd, hotcpu), 476 kthread_bind(per_cpu(ksoftirqd, hotcpu),
475 any_online_cpu(cpu_online_map)); 477 any_online_cpu(cpu_online_map));
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 2c1be1163edc..b5c3b94e01ce 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -127,6 +127,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
127 break; 127 break;
128#ifdef CONFIG_HOTPLUG_CPU 128#ifdef CONFIG_HOTPLUG_CPU
129 case CPU_UP_CANCELED: 129 case CPU_UP_CANCELED:
130 if (!per_cpu(watchdog_task, hotcpu))
131 break;
130 /* Unbind so it can run. Fall thru. */ 132 /* Unbind so it can run. Fall thru. */
131 kthread_bind(per_cpu(watchdog_task, hotcpu), 133 kthread_bind(per_cpu(watchdog_task, hotcpu),
132 any_online_cpu(cpu_online_map)); 134 any_online_cpu(cpu_online_map));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f869aff6bc0c..565cf7a1febd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -590,6 +590,8 @@ static int workqueue_cpu_callback(struct notifier_block *nfb,
590 590
591 case CPU_UP_CANCELED: 591 case CPU_UP_CANCELED:
592 list_for_each_entry(wq, &workqueues, list) { 592 list_for_each_entry(wq, &workqueues, list) {
593 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
594 continue;
593 /* Unbind so it can run. */ 595 /* Unbind so it can run. */
594 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 596 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
595 any_online_cpu(cpu_online_map)); 597 any_online_cpu(cpu_online_map));