diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-12 19:28:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-12 19:28:26 -0500 |
commit | 1181a2449969c59f0ab6b95374fe6983cc07286d (patch) | |
tree | cdbd320e6d2e0854ef8c699eb711d22e03275522 | |
parent | b743791639d8142277df1c2814c282e3ad752f06 (diff) | |
parent | 9d0793370987b98708d2f75ee3bba7c1008d8512 (diff) |
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sparc64: Fix cpumask related build failure
smp_call_function_single(): be slightly less stupid, fix
smp_call_function_single(): be slightly less stupid
rcu: fix bug in rcutorture system-shutdown code
-rw-r--r-- | arch/sparc/include/asm/topology_64.h | 4 | ||||
-rw-r--r-- | include/linux/smp.h | 13 | ||||
-rw-r--r-- | kernel/Makefile | 6 | ||||
-rw-r--r-- | kernel/rcutorture.c | 113 | ||||
-rw-r--r-- | kernel/up.c | 20 |
5 files changed, 100 insertions, 56 deletions
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index b8a65b64e1df..5bc0b8fd6374 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
@@ -47,6 +47,10 @@ static inline int pcibus_to_node(struct pci_bus *pbus) | |||
47 | (pcibus_to_node(bus) == -1 ? \ | 47 | (pcibus_to_node(bus) == -1 ? \ |
48 | CPU_MASK_ALL : \ | 48 | CPU_MASK_ALL : \ |
49 | node_to_cpumask(pcibus_to_node(bus))) | 49 | node_to_cpumask(pcibus_to_node(bus))) |
50 | #define cpumask_of_pcibus(bus) \ | ||
51 | (pcibus_to_node(bus) == -1 ? \ | ||
52 | CPU_MASK_ALL_PTR : \ | ||
53 | cpumask_of_node(pcibus_to_node(bus))) | ||
50 | 54 | ||
51 | #define SD_NODE_INIT (struct sched_domain) { \ | 55 | #define SD_NODE_INIT (struct sched_domain) { \ |
52 | .min_interval = 8, \ | 56 | .min_interval = 8, \ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index b82466968101..715196b09d67 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -24,6 +24,9 @@ struct call_single_data { | |||
24 | /* total number of cpus in this system (may exceed NR_CPUS) */ | 24 | /* total number of cpus in this system (may exceed NR_CPUS) */ |
25 | extern unsigned int total_cpus; | 25 | extern unsigned int total_cpus; |
26 | 26 | ||
27 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | ||
28 | int wait); | ||
29 | |||
27 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
28 | 31 | ||
29 | #include <linux/preempt.h> | 32 | #include <linux/preempt.h> |
@@ -79,8 +82,6 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | |||
79 | return 0; | 82 | return 0; |
80 | } | 83 | } |
81 | 84 | ||
82 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | ||
83 | int wait); | ||
84 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | 85 | void __smp_call_function_single(int cpuid, struct call_single_data *data); |
85 | 86 | ||
86 | /* | 87 | /* |
@@ -140,14 +141,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) | |||
140 | static inline void smp_send_reschedule(int cpu) { } | 141 | static inline void smp_send_reschedule(int cpu) { } |
141 | #define num_booting_cpus() 1 | 142 | #define num_booting_cpus() 1 |
142 | #define smp_prepare_boot_cpu() do {} while (0) | 143 | #define smp_prepare_boot_cpu() do {} while (0) |
143 | #define smp_call_function_single(cpuid, func, info, wait) \ | ||
144 | ({ \ | ||
145 | WARN_ON(cpuid != 0); \ | ||
146 | local_irq_disable(); \ | ||
147 | (func)(info); \ | ||
148 | local_irq_enable(); \ | ||
149 | 0; \ | ||
150 | }) | ||
151 | #define smp_call_function_mask(mask, func, info, wait) \ | 144 | #define smp_call_function_mask(mask, func, info, wait) \ |
152 | (up_smp_call_function(func, info)) | 145 | (up_smp_call_function(func, info)) |
153 | #define smp_call_function_many(mask, func, info, wait) \ | 146 | #define smp_call_function_many(mask, func, info, wait) \ |
diff --git a/kernel/Makefile b/kernel/Makefile index 2921d90ce32f..2aebc4cd7878 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -40,7 +40,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | |||
40 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 40 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
41 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 41 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
42 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 42 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
43 | obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o | 43 | ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y) |
44 | obj-y += smp.o | ||
45 | else | ||
46 | obj-y += up.o | ||
47 | endif | ||
44 | obj-$(CONFIG_SMP) += spinlock.o | 48 | obj-$(CONFIG_SMP) += spinlock.o |
45 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 49 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
46 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 50 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 1cff28db56b6..7c4142a79f0a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -136,29 +136,47 @@ static int stutter_pause_test = 0; | |||
136 | #endif | 136 | #endif |
137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
138 | 138 | ||
139 | #define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */ | 139 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ |
140 | #define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ | 140 | |
141 | static int fullstop; /* stop generating callbacks at test end. */ | 141 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ |
142 | DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ | 142 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ |
143 | /* spawning of kthreads. */ | 143 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ |
144 | static int fullstop = FULLSTOP_RMMOD; | ||
145 | DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */ | ||
146 | /* of kthreads. */ | ||
144 | 147 | ||
145 | /* | 148 | /* |
146 | * Detect and respond to a signal-based shutdown. | 149 | * Detect and respond to a system shutdown. |
147 | */ | 150 | */ |
148 | static int | 151 | static int |
149 | rcutorture_shutdown_notify(struct notifier_block *unused1, | 152 | rcutorture_shutdown_notify(struct notifier_block *unused1, |
150 | unsigned long unused2, void *unused3) | 153 | unsigned long unused2, void *unused3) |
151 | { | 154 | { |
152 | if (fullstop) | ||
153 | return NOTIFY_DONE; | ||
154 | mutex_lock(&fullstop_mutex); | 155 | mutex_lock(&fullstop_mutex); |
155 | if (!fullstop) | 156 | if (fullstop == FULLSTOP_DONTSTOP) |
156 | fullstop = FULLSTOP_SHUTDOWN; | 157 | fullstop = FULLSTOP_SHUTDOWN; |
158 | else | ||
159 | printk(KERN_WARNING /* but going down anyway, so... */ | ||
160 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
157 | mutex_unlock(&fullstop_mutex); | 161 | mutex_unlock(&fullstop_mutex); |
158 | return NOTIFY_DONE; | 162 | return NOTIFY_DONE; |
159 | } | 163 | } |
160 | 164 | ||
161 | /* | 165 | /* |
166 | * Absorb kthreads into a kernel function that won't return, so that | ||
167 | * they won't ever access module text or data again. | ||
168 | */ | ||
169 | static void rcutorture_shutdown_absorb(char *title) | ||
170 | { | ||
171 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | ||
172 | printk(KERN_NOTICE | ||
173 | "rcutorture thread %s parking due to system shutdown\n", | ||
174 | title); | ||
175 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /* | ||
162 | * Allocate an element from the rcu_tortures pool. | 180 | * Allocate an element from the rcu_tortures pool. |
163 | */ | 181 | */ |
164 | static struct rcu_torture * | 182 | static struct rcu_torture * |
@@ -219,13 +237,14 @@ rcu_random(struct rcu_random_state *rrsp) | |||
219 | } | 237 | } |
220 | 238 | ||
221 | static void | 239 | static void |
222 | rcu_stutter_wait(void) | 240 | rcu_stutter_wait(char *title) |
223 | { | 241 | { |
224 | while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) { | 242 | while (stutter_pause_test || !rcutorture_runnable) { |
225 | if (rcutorture_runnable) | 243 | if (rcutorture_runnable) |
226 | schedule_timeout_interruptible(1); | 244 | schedule_timeout_interruptible(1); |
227 | else | 245 | else |
228 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | 246 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); |
247 | rcutorture_shutdown_absorb(title); | ||
229 | } | 248 | } |
230 | } | 249 | } |
231 | 250 | ||
@@ -287,7 +306,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
287 | int i; | 306 | int i; |
288 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | 307 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
289 | 308 | ||
290 | if (fullstop) { | 309 | if (fullstop != FULLSTOP_DONTSTOP) { |
291 | /* Test is ending, just drop callbacks on the floor. */ | 310 | /* Test is ending, just drop callbacks on the floor. */ |
292 | /* The next initialization will pick up the pieces. */ | 311 | /* The next initialization will pick up the pieces. */ |
293 | return; | 312 | return; |
@@ -619,10 +638,11 @@ rcu_torture_writer(void *arg) | |||
619 | } | 638 | } |
620 | rcu_torture_current_version++; | 639 | rcu_torture_current_version++; |
621 | oldbatch = cur_ops->completed(); | 640 | oldbatch = cur_ops->completed(); |
622 | rcu_stutter_wait(); | 641 | rcu_stutter_wait("rcu_torture_writer"); |
623 | } while (!kthread_should_stop() && !fullstop); | 642 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
624 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 643 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
625 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 644 | rcutorture_shutdown_absorb("rcu_torture_writer"); |
645 | while (!kthread_should_stop()) | ||
626 | schedule_timeout_uninterruptible(1); | 646 | schedule_timeout_uninterruptible(1); |
627 | return 0; | 647 | return 0; |
628 | } | 648 | } |
@@ -643,11 +663,12 @@ rcu_torture_fakewriter(void *arg) | |||
643 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | 663 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); |
644 | udelay(rcu_random(&rand) & 0x3ff); | 664 | udelay(rcu_random(&rand) & 0x3ff); |
645 | cur_ops->sync(); | 665 | cur_ops->sync(); |
646 | rcu_stutter_wait(); | 666 | rcu_stutter_wait("rcu_torture_fakewriter"); |
647 | } while (!kthread_should_stop() && !fullstop); | 667 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
648 | 668 | ||
649 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 669 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); |
650 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 670 | rcutorture_shutdown_absorb("rcu_torture_fakewriter"); |
671 | while (!kthread_should_stop()) | ||
651 | schedule_timeout_uninterruptible(1); | 672 | schedule_timeout_uninterruptible(1); |
652 | return 0; | 673 | return 0; |
653 | } | 674 | } |
@@ -752,12 +773,13 @@ rcu_torture_reader(void *arg) | |||
752 | preempt_enable(); | 773 | preempt_enable(); |
753 | cur_ops->readunlock(idx); | 774 | cur_ops->readunlock(idx); |
754 | schedule(); | 775 | schedule(); |
755 | rcu_stutter_wait(); | 776 | rcu_stutter_wait("rcu_torture_reader"); |
756 | } while (!kthread_should_stop() && !fullstop); | 777 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
757 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 778 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
779 | rcutorture_shutdown_absorb("rcu_torture_reader"); | ||
758 | if (irqreader && cur_ops->irqcapable) | 780 | if (irqreader && cur_ops->irqcapable) |
759 | del_timer_sync(&t); | 781 | del_timer_sync(&t); |
760 | while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | 782 | while (!kthread_should_stop()) |
761 | schedule_timeout_uninterruptible(1); | 783 | schedule_timeout_uninterruptible(1); |
762 | return 0; | 784 | return 0; |
763 | } | 785 | } |
@@ -854,7 +876,8 @@ rcu_torture_stats(void *arg) | |||
854 | do { | 876 | do { |
855 | schedule_timeout_interruptible(stat_interval * HZ); | 877 | schedule_timeout_interruptible(stat_interval * HZ); |
856 | rcu_torture_stats_print(); | 878 | rcu_torture_stats_print(); |
857 | } while (!kthread_should_stop() && !fullstop); | 879 | rcutorture_shutdown_absorb("rcu_torture_stats"); |
880 | } while (!kthread_should_stop()); | ||
858 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); | 881 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); |
859 | return 0; | 882 | return 0; |
860 | } | 883 | } |
@@ -866,52 +889,49 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | |||
866 | */ | 889 | */ |
867 | static void rcu_torture_shuffle_tasks(void) | 890 | static void rcu_torture_shuffle_tasks(void) |
868 | { | 891 | { |
869 | cpumask_var_t tmp_mask; | 892 | cpumask_t tmp_mask; |
870 | int i; | 893 | int i; |
871 | 894 | ||
872 | if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) | 895 | cpus_setall(tmp_mask); |
873 | BUG(); | ||
874 | |||
875 | cpumask_setall(tmp_mask); | ||
876 | get_online_cpus(); | 896 | get_online_cpus(); |
877 | 897 | ||
878 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | 898 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
879 | if (num_online_cpus() == 1) | 899 | if (num_online_cpus() == 1) { |
880 | goto out; | 900 | put_online_cpus(); |
901 | return; | ||
902 | } | ||
881 | 903 | ||
882 | if (rcu_idle_cpu != -1) | 904 | if (rcu_idle_cpu != -1) |
883 | cpumask_clear_cpu(rcu_idle_cpu, tmp_mask); | 905 | cpu_clear(rcu_idle_cpu, tmp_mask); |
884 | 906 | ||
885 | set_cpus_allowed_ptr(current, tmp_mask); | 907 | set_cpus_allowed_ptr(current, &tmp_mask); |
886 | 908 | ||
887 | if (reader_tasks) { | 909 | if (reader_tasks) { |
888 | for (i = 0; i < nrealreaders; i++) | 910 | for (i = 0; i < nrealreaders; i++) |
889 | if (reader_tasks[i]) | 911 | if (reader_tasks[i]) |
890 | set_cpus_allowed_ptr(reader_tasks[i], | 912 | set_cpus_allowed_ptr(reader_tasks[i], |
891 | tmp_mask); | 913 | &tmp_mask); |
892 | } | 914 | } |
893 | 915 | ||
894 | if (fakewriter_tasks) { | 916 | if (fakewriter_tasks) { |
895 | for (i = 0; i < nfakewriters; i++) | 917 | for (i = 0; i < nfakewriters; i++) |
896 | if (fakewriter_tasks[i]) | 918 | if (fakewriter_tasks[i]) |
897 | set_cpus_allowed_ptr(fakewriter_tasks[i], | 919 | set_cpus_allowed_ptr(fakewriter_tasks[i], |
898 | tmp_mask); | 920 | &tmp_mask); |
899 | } | 921 | } |
900 | 922 | ||
901 | if (writer_task) | 923 | if (writer_task) |
902 | set_cpus_allowed_ptr(writer_task, tmp_mask); | 924 | set_cpus_allowed_ptr(writer_task, &tmp_mask); |
903 | 925 | ||
904 | if (stats_task) | 926 | if (stats_task) |
905 | set_cpus_allowed_ptr(stats_task, tmp_mask); | 927 | set_cpus_allowed_ptr(stats_task, &tmp_mask); |
906 | 928 | ||
907 | if (rcu_idle_cpu == -1) | 929 | if (rcu_idle_cpu == -1) |
908 | rcu_idle_cpu = num_online_cpus() - 1; | 930 | rcu_idle_cpu = num_online_cpus() - 1; |
909 | else | 931 | else |
910 | rcu_idle_cpu--; | 932 | rcu_idle_cpu--; |
911 | 933 | ||
912 | out: | ||
913 | put_online_cpus(); | 934 | put_online_cpus(); |
914 | free_cpumask_var(tmp_mask); | ||
915 | } | 935 | } |
916 | 936 | ||
917 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | 937 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the |
@@ -925,7 +945,8 @@ rcu_torture_shuffle(void *arg) | |||
925 | do { | 945 | do { |
926 | schedule_timeout_interruptible(shuffle_interval * HZ); | 946 | schedule_timeout_interruptible(shuffle_interval * HZ); |
927 | rcu_torture_shuffle_tasks(); | 947 | rcu_torture_shuffle_tasks(); |
928 | } while (!kthread_should_stop() && !fullstop); | 948 | rcutorture_shutdown_absorb("rcu_torture_shuffle"); |
949 | } while (!kthread_should_stop()); | ||
929 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); | 950 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); |
930 | return 0; | 951 | return 0; |
931 | } | 952 | } |
@@ -940,10 +961,11 @@ rcu_torture_stutter(void *arg) | |||
940 | do { | 961 | do { |
941 | schedule_timeout_interruptible(stutter * HZ); | 962 | schedule_timeout_interruptible(stutter * HZ); |
942 | stutter_pause_test = 1; | 963 | stutter_pause_test = 1; |
943 | if (!kthread_should_stop() && !fullstop) | 964 | if (!kthread_should_stop()) |
944 | schedule_timeout_interruptible(stutter * HZ); | 965 | schedule_timeout_interruptible(stutter * HZ); |
945 | stutter_pause_test = 0; | 966 | stutter_pause_test = 0; |
946 | } while (!kthread_should_stop() && !fullstop); | 967 | rcutorture_shutdown_absorb("rcu_torture_stutter"); |
968 | } while (!kthread_should_stop()); | ||
947 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | 969 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); |
948 | return 0; | 970 | return 0; |
949 | } | 971 | } |
@@ -970,15 +992,16 @@ rcu_torture_cleanup(void) | |||
970 | int i; | 992 | int i; |
971 | 993 | ||
972 | mutex_lock(&fullstop_mutex); | 994 | mutex_lock(&fullstop_mutex); |
973 | if (!fullstop) { | 995 | if (fullstop == FULLSTOP_SHUTDOWN) { |
974 | /* If being signaled, let it happen, then exit. */ | 996 | printk(KERN_WARNING /* but going down anyway, so... */ |
997 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
975 | mutex_unlock(&fullstop_mutex); | 998 | mutex_unlock(&fullstop_mutex); |
976 | schedule_timeout_interruptible(10 * HZ); | 999 | schedule_timeout_uninterruptible(10); |
977 | if (cur_ops->cb_barrier != NULL) | 1000 | if (cur_ops->cb_barrier != NULL) |
978 | cur_ops->cb_barrier(); | 1001 | cur_ops->cb_barrier(); |
979 | return; | 1002 | return; |
980 | } | 1003 | } |
981 | fullstop = FULLSTOP_CLEANUP; | 1004 | fullstop = FULLSTOP_RMMOD; |
982 | mutex_unlock(&fullstop_mutex); | 1005 | mutex_unlock(&fullstop_mutex); |
983 | unregister_reboot_notifier(&rcutorture_nb); | 1006 | unregister_reboot_notifier(&rcutorture_nb); |
984 | if (stutter_task) { | 1007 | if (stutter_task) { |
@@ -1078,7 +1101,7 @@ rcu_torture_init(void) | |||
1078 | else | 1101 | else |
1079 | nrealreaders = 2 * num_online_cpus(); | 1102 | nrealreaders = 2 * num_online_cpus(); |
1080 | rcu_torture_print_module_parms("Start of test"); | 1103 | rcu_torture_print_module_parms("Start of test"); |
1081 | fullstop = 0; | 1104 | fullstop = FULLSTOP_DONTSTOP; |
1082 | 1105 | ||
1083 | /* Set up the freelist. */ | 1106 | /* Set up the freelist. */ |
1084 | 1107 | ||
diff --git a/kernel/up.c b/kernel/up.c new file mode 100644 index 000000000000..c04b9dcfcebe --- /dev/null +++ b/kernel/up.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Uniprocessor-only support functions. The counterpart to kernel/smp.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/smp.h> | ||
8 | |||
9 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
10 | int wait) | ||
11 | { | ||
12 | WARN_ON(cpu != 0); | ||
13 | |||
14 | local_irq_disable(); | ||
15 | (func)(info); | ||
16 | local_irq_enable(); | ||
17 | |||
18 | return 0; | ||
19 | } | ||
20 | EXPORT_SYMBOL(smp_call_function_single); | ||