aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/smp.h5
-rw-r--r--init/main.c23
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c25
7 files changed, 32 insertions, 42 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3260a5c42b91..adb8077dc463 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -292,7 +292,6 @@ extern void sched_show_task(struct task_struct *p);
292 292
293#ifdef CONFIG_DETECT_SOFTLOCKUP 293#ifdef CONFIG_DETECT_SOFTLOCKUP
294extern void softlockup_tick(void); 294extern void softlockup_tick(void);
295extern void spawn_softlockup_task(void);
296extern void touch_softlockup_watchdog(void); 295extern void touch_softlockup_watchdog(void);
297extern void touch_all_softlockup_watchdogs(void); 296extern void touch_all_softlockup_watchdogs(void);
298extern unsigned int softlockup_panic; 297extern unsigned int softlockup_panic;
@@ -2222,14 +2221,6 @@ static inline void inc_syscw(struct task_struct *tsk)
2222} 2221}
2223#endif 2222#endif
2224 2223
2225#ifdef CONFIG_SMP
2226void migration_init(void);
2227#else
2228static inline void migration_init(void)
2229{
2230}
2231#endif
2232
2233#ifndef TASK_SIZE_OF 2224#ifndef TASK_SIZE_OF
2234#define TASK_SIZE_OF(tsk) TASK_SIZE 2225#define TASK_SIZE_OF(tsk) TASK_SIZE
2235#endif 2226#endif
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 48262f86c969..66484d4a8459 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data);
74#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 74#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
75void generic_smp_call_function_single_interrupt(void); 75void generic_smp_call_function_single_interrupt(void);
76void generic_smp_call_function_interrupt(void); 76void generic_smp_call_function_interrupt(void);
77void init_call_single_data(void);
78void ipi_call_lock(void); 77void ipi_call_lock(void);
79void ipi_call_unlock(void); 78void ipi_call_unlock(void);
80void ipi_call_lock_irq(void); 79void ipi_call_lock_irq(void);
81void ipi_call_unlock_irq(void); 80void ipi_call_unlock_irq(void);
82#else
83static inline void init_call_single_data(void)
84{
85}
86#endif 81#endif
87 82
88/* 83/*
diff --git a/init/main.c b/init/main.c
index b6fec08dbbef..20fdc9884b77 100644
--- a/init/main.c
+++ b/init/main.c
@@ -774,16 +774,7 @@ static void __init do_basic_setup(void)
774 do_initcalls(); 774 do_initcalls();
775} 775}
776 776
777static int __initdata nosoftlockup; 777static void __init do_pre_smp_initcalls(void)
778
779static int __init nosoftlockup_setup(char *str)
780{
781 nosoftlockup = 1;
782 return 1;
783}
784__setup("nosoftlockup", nosoftlockup_setup);
785
786static void __init __do_pre_smp_initcalls(void)
787{ 778{
788 initcall_t *call; 779 initcall_t *call;
789 780
@@ -791,17 +782,6 @@ static void __init __do_pre_smp_initcalls(void)
791 do_one_initcall(*call); 782 do_one_initcall(*call);
792} 783}
793 784
794static void __init do_pre_smp_initcalls(void)
795{
796 extern int spawn_ksoftirqd(void);
797
798 init_call_single_data();
799 migration_init();
800 spawn_ksoftirqd();
801 if (!nosoftlockup)
802 spawn_softlockup_task();
803}
804
805static void run_init_process(char *init_filename) 785static void run_init_process(char *init_filename)
806{ 786{
807 argv_init[0] = init_filename; 787 argv_init[0] = init_filename;
@@ -873,7 +853,6 @@ static int __init kernel_init(void * unused)
873 853
874 smp_prepare_cpus(setup_max_cpus); 854 smp_prepare_cpus(setup_max_cpus);
875 855
876 __do_pre_smp_initcalls();
877 do_pre_smp_initcalls(); 856 do_pre_smp_initcalls();
878 857
879 smp_init(); 858 smp_init();
diff --git a/kernel/sched.c b/kernel/sched.c
index 0047bd9b96aa..fde1a1026359 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6389,7 +6389,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
6389 .priority = 10 6389 .priority = 10
6390}; 6390};
6391 6391
6392void __init migration_init(void) 6392static int __init migration_init(void)
6393{ 6393{
6394 void *cpu = (void *)(long)smp_processor_id(); 6394 void *cpu = (void *)(long)smp_processor_id();
6395 int err; 6395 int err;
@@ -6399,7 +6399,10 @@ void __init migration_init(void)
6399 BUG_ON(err == NOTIFY_BAD); 6399 BUG_ON(err == NOTIFY_BAD);
6400 migration_call(&migration_notifier, CPU_ONLINE, cpu); 6400 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6401 register_cpu_notifier(&migration_notifier); 6401 register_cpu_notifier(&migration_notifier);
6402
6403 return err;
6402} 6404}
6405early_initcall(migration_init);
6403#endif 6406#endif
6404 6407
6405#ifdef CONFIG_SMP 6408#ifdef CONFIG_SMP
diff --git a/kernel/smp.c b/kernel/smp.c
index 462c785ca1ee..96fc7c0edc59 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,7 +33,7 @@ struct call_single_queue {
33 spinlock_t lock; 33 spinlock_t lock;
34}; 34};
35 35
36void __cpuinit init_call_single_data(void) 36static int __cpuinit init_call_single_data(void)
37{ 37{
38 int i; 38 int i;
39 39
@@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void)
43 spin_lock_init(&q->lock); 43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list); 44 INIT_LIST_HEAD(&q->list);
45 } 45 }
46 return 0;
46} 47}
48early_initcall(init_call_single_data);
47 49
48static void csd_flag_wait(struct call_single_data *data) 50static void csd_flag_wait(struct call_single_data *data)
49{ 51{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f6b03d56c2bf..c506f266a6b9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -630,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
630 .notifier_call = cpu_callback 630 .notifier_call = cpu_callback
631}; 631};
632 632
633__init int spawn_ksoftirqd(void) 633static __init int spawn_ksoftirqd(void)
634{ 634{
635 void *cpu = (void *)(long)smp_processor_id(); 635 void *cpu = (void *)(long)smp_processor_id();
636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
@@ -640,6 +640,7 @@ __init int spawn_ksoftirqd(void)
640 register_cpu_notifier(&cpu_nfb); 640 register_cpu_notifier(&cpu_nfb);
641 return 0; 641 return 0;
642} 642}
643early_initcall(spawn_ksoftirqd);
643 644
644#ifdef CONFIG_SMP 645#ifdef CONFIG_SMP
645/* 646/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 7bd8d1aadd5d..b75b492fbfcf 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -338,14 +338,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
338 .notifier_call = cpu_callback 338 .notifier_call = cpu_callback
339}; 339};
340 340
341__init void spawn_softlockup_task(void) 341static int __initdata nosoftlockup;
342
343static int __init nosoftlockup_setup(char *str)
344{
345 nosoftlockup = 1;
346 return 1;
347}
348__setup("nosoftlockup", nosoftlockup_setup);
349
350static int __init spawn_softlockup_task(void)
342{ 351{
343 void *cpu = (void *)(long)smp_processor_id(); 352 void *cpu = (void *)(long)smp_processor_id();
344 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 353 int err;
345 354
346 BUG_ON(err == NOTIFY_BAD); 355 if (nosoftlockup)
356 return 0;
357
358 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
359 if (err == NOTIFY_BAD) {
360 BUG();
361 return 1;
362 }
347 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 363 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
348 register_cpu_notifier(&cpu_nfb); 364 register_cpu_notifier(&cpu_nfb);
349 365
350 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 366 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
367
368 return 0;
351} 369}
370early_initcall(spawn_softlockup_task);