aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-26 11:22:17 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-27 14:57:43 -0400
commitceff8d859c77981147c320da4074dcf8a06501a4 (patch)
treec1a4913e646c9d2ea2410e96184b4b00e104dd74
parentef49c32b8489a845a54ca4689b17bfbf8db9bf9e (diff)
Revert "i386: export i386 smp_call_function_mask() to modules"
This reverts commit 6442eea937ef797d4b66733f49c82e2fdc2aca6f. The patch breaks smp_ops and needs to be reverted. The solution to allow modular build of KVM is to export smp_ops instead. Pointed-out-by: James Bottomley <jejb> tglx, so write out 100 times "voyager is a useful architecture" ... <tglx> yes, Sir Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/smp_32.c7
-rw-r--r--include/asm-x86/smp_32.h9
2 files changed, 6 insertions, 10 deletions
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index f32115308399..22a6fa0cfb2a 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -708,10 +708,3 @@ struct smp_ops smp_ops = {
708 .smp_send_reschedule = native_smp_send_reschedule, 708 .smp_send_reschedule = native_smp_send_reschedule,
709 .smp_call_function_mask = native_smp_call_function_mask, 709 .smp_call_function_mask = native_smp_call_function_mask,
710}; 710};
711
712int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
713 void *info, int wait)
714{
715 return smp_ops.smp_call_function_mask(mask, func, info, wait);
716}
717EXPORT_SYMBOL(smp_call_function_mask);
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 7056d8684522..e10b7affdfe5 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -94,9 +94,12 @@ static inline void smp_send_reschedule(int cpu)
94{ 94{
95 smp_ops.smp_send_reschedule(cpu); 95 smp_ops.smp_send_reschedule(cpu);
96} 96}
97extern int smp_call_function_mask(cpumask_t mask, 97static inline int smp_call_function_mask(cpumask_t mask,
98 void (*func) (void *info), void *info, 98 void (*func) (void *info), void *info,
99 int wait); 99 int wait)
100{
101 return smp_ops.smp_call_function_mask(mask, func, info, wait);
102}
100 103
101void native_smp_prepare_boot_cpu(void); 104void native_smp_prepare_boot_cpu(void);
102void native_smp_prepare_cpus(unsigned int max_cpus); 105void native_smp_prepare_cpus(unsigned int max_cpus);