aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softlockup.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softlockup.c')
-rw-r--r--kernel/softlockup.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 75976209cea7..c67189a25d52 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -73,9 +73,6 @@ void softlockup_tick(struct pt_regs *regs)
73static int watchdog(void * __bind_cpu) 73static int watchdog(void * __bind_cpu)
74{ 74{
75 struct sched_param param = { .sched_priority = 99 }; 75 struct sched_param param = { .sched_priority = 99 };
76 int this_cpu = (long) __bind_cpu;
77
78 printk("softlockup thread %d started up.\n", this_cpu);
79 76
80 sched_setscheduler(current, SCHED_FIFO, &param); 77 sched_setscheduler(current, SCHED_FIFO, &param);
81 current->flags |= PF_NOFREEZE; 78 current->flags |= PF_NOFREEZE;
@@ -123,7 +120,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
123#ifdef CONFIG_HOTPLUG_CPU 120#ifdef CONFIG_HOTPLUG_CPU
124 case CPU_UP_CANCELED: 121 case CPU_UP_CANCELED:
125 /* Unbind so it can run. Fall thru. */ 122 /* Unbind so it can run. Fall thru. */
126 kthread_bind(per_cpu(watchdog_task, hotcpu), smp_processor_id()); 123 kthread_bind(per_cpu(watchdog_task, hotcpu),
124 any_online_cpu(cpu_online_map));
127 case CPU_DEAD: 125 case CPU_DEAD:
128 p = per_cpu(watchdog_task, hotcpu); 126 p = per_cpu(watchdog_task, hotcpu);
129 per_cpu(watchdog_task, hotcpu) = NULL; 127 per_cpu(watchdog_task, hotcpu) = NULL;