aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
commit1a781a777b2f6ac46523fe92396215762ced624d (patch)
tree4f34bb4aade85c0eb364b53d664ec7f6ab959006 /include
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
parent42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff)
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts: arch/powerpc/Kconfig arch/s390/kernel/time.c arch/x86/kernel/apic_32.c arch/x86/kernel/cpu/perfctr-watchdog.c arch/x86/kernel/i8259_64.c arch/x86/kernel/ldt.c arch/x86/kernel/nmi_64.c arch/x86/kernel/smpboot.c arch/x86/xen/smp.c include/asm-x86/hw_irq_32.h include/asm-x86/hw_irq_64.h include/asm-x86/mach-default/irq_vectors.h include/asm-x86/mach-voyager/irq_vectors.h include/asm-x86/smp.h kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/smp.h5
-rw-r--r--include/asm-arm/smp.h3
-rw-r--r--include/asm-ia64/smp.h8
-rw-r--r--include/asm-m32r/smp.h4
-rw-r--r--include/asm-mips/smp.h13
-rw-r--r--include/asm-parisc/smp.h3
-rw-r--r--include/asm-powerpc/smp.h8
-rw-r--r--include/asm-sh/smp.h14
-rw-r--r--include/asm-sparc/smp.h2
-rw-r--r--include/asm-x86/hw_irq.h1
-rw-r--r--include/asm-x86/irq_vectors.h6
-rw-r--r--include/asm-x86/mach-default/entry_arch.h1
-rw-r--r--include/asm-x86/mach-visws/entry_arch.h22
-rw-r--r--include/asm-x86/mach-voyager/entry_arch.h2
-rw-r--r--include/asm-x86/smp.h21
-rw-r--r--include/asm-x86/xen/events.h1
-rw-r--r--include/linux/smp.h46
17 files changed, 89 insertions, 71 deletions
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 286e1d844f63..544c69af8168 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -47,12 +47,13 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
47extern int smp_num_cpus; 47extern int smp_num_cpus;
48#define cpu_possible_map cpu_present_map 48#define cpu_possible_map cpu_present_map
49 49
50int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); 50extern void arch_send_call_function_single_ipi(int cpu);
51extern void arch_send_call_function_ipi(cpumask_t mask);
51 52
52#else /* CONFIG_SMP */ 53#else /* CONFIG_SMP */
53 54
54#define hard_smp_processor_id() 0 55#define hard_smp_processor_id() 0
55#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) 56#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; })
56 57
57#endif /* CONFIG_SMP */ 58#endif /* CONFIG_SMP */
58 59
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h
index af99636db400..7fffa2404b8e 100644
--- a/include/asm-arm/smp.h
+++ b/include/asm-arm/smp.h
@@ -101,6 +101,9 @@ extern void platform_cpu_die(unsigned int cpu);
101extern int platform_cpu_kill(unsigned int cpu); 101extern int platform_cpu_kill(unsigned int cpu);
102extern void platform_cpu_enable(unsigned int cpu); 102extern void platform_cpu_enable(unsigned int cpu);
103 103
104extern void arch_send_call_function_single_ipi(int cpu);
105extern void arch_send_call_function_ipi(cpumask_t mask);
106
104/* 107/*
105 * Local timer interrupt handling function (can be IPI'ed). 108 * Local timer interrupt handling function (can be IPI'ed).
106 */ 109 */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index ec5f355fb7e3..27731e032ee9 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,9 +38,6 @@ ia64_get_lid (void)
38 return lid.f.id << 8 | lid.f.eid; 38 return lid.f.id << 8 | lid.f.eid;
39} 39}
40 40
41extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
42 void *info, int wait);
43
44#define hard_smp_processor_id() ia64_get_lid() 41#define hard_smp_processor_id() ia64_get_lid()
45 42
46#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
@@ -124,11 +121,12 @@ extern void __init init_smp_config (void);
124extern void smp_do_timer (struct pt_regs *regs); 121extern void smp_do_timer (struct pt_regs *regs);
125 122
126extern void smp_send_reschedule (int cpu); 123extern void smp_send_reschedule (int cpu);
127extern void lock_ipi_calllock(void);
128extern void unlock_ipi_calllock(void);
129extern void identify_siblings (struct cpuinfo_ia64 *); 124extern void identify_siblings (struct cpuinfo_ia64 *);
130extern int is_multithreading_enabled(void); 125extern int is_multithreading_enabled(void);
131 126
127extern void arch_send_call_function_single_ipi(int cpu);
128extern void arch_send_call_function_ipi(cpumask_t mask);
129
132#else /* CONFIG_SMP */ 130#else /* CONFIG_SMP */
133 131
134#define cpu_logical_id(i) 0 132#define cpu_logical_id(i) 0
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index 078e1a51a042..c5dd66916692 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -89,6 +89,9 @@ static __inline__ unsigned int num_booting_cpus(void)
89extern void smp_send_timer(void); 89extern void smp_send_timer(void);
90extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 90extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
91 91
92extern void arch_send_call_function_single_ipi(int cpu);
93extern void arch_send_call_function_ipi(cpumask_t mask);
94
92#endif /* not __ASSEMBLY__ */ 95#endif /* not __ASSEMBLY__ */
93 96
94#define NO_PROC_ID (0xff) /* No processor magic marker */ 97#define NO_PROC_ID (0xff) /* No processor magic marker */
@@ -104,6 +107,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
104#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0) 107#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
105#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0) 108#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
106#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0) 109#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
110#define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0)
107 111
108#define IPI_SHIFT (0) 112#define IPI_SHIFT (0)
109#define NR_IPIS (8) 113#define NR_IPIS (8)
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 84fef1aeec0c..0ff5b523ea77 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -35,16 +35,6 @@ extern int __cpu_logical_map[NR_CPUS];
35 35
36#define NO_PROC_ID (-1) 36#define NO_PROC_ID (-1)
37 37
38struct call_data_struct {
39 void (*func)(void *);
40 void *info;
41 atomic_t started;
42 atomic_t finished;
43 int wait;
44};
45
46extern struct call_data_struct *call_data;
47
48#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
49#define SMP_CALL_FUNCTION 0x2 39#define SMP_CALL_FUNCTION 0x2
50 40
@@ -67,4 +57,7 @@ static inline void smp_send_reschedule(int cpu)
67 57
68extern asmlinkage void smp_call_function_interrupt(void); 58extern asmlinkage void smp_call_function_interrupt(void);
69 59
60extern void arch_send_call_function_single_ipi(int cpu);
61extern void arch_send_call_function_ipi(cpumask_t mask);
62
70#endif /* __ASM_SMP_H */ 63#endif /* __ASM_SMP_H */
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 306f4950e32e..398cdbaf4e54 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -30,6 +30,9 @@ extern cpumask_t cpu_online_map;
30extern void smp_send_reschedule(int cpu); 30extern void smp_send_reschedule(int cpu);
31extern void smp_send_all_nop(void); 31extern void smp_send_all_nop(void);
32 32
33extern void arch_send_call_function_single_ipi(int cpu);
34extern void arch_send_call_function_ipi(cpumask_t mask);
35
33#endif /* !ASSEMBLY */ 36#endif /* !ASSEMBLY */
34 37
35/* 38/*
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 505f35bacaa9..c663a1fa77c5 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
67 * in /proc/interrupts will be wrong!!! --Troy */ 67 * in /proc/interrupts will be wrong!!! --Troy */
68#define PPC_MSG_CALL_FUNCTION 0 68#define PPC_MSG_CALL_FUNCTION 0
69#define PPC_MSG_RESCHEDULE 1 69#define PPC_MSG_RESCHEDULE 1
70/* This is unused now */ 70#define PPC_MSG_CALL_FUNC_SINGLE 2
71#if 0
72#define PPC_MSG_MIGRATE_TASK 2
73#endif
74#define PPC_MSG_DEBUGGER_BREAK 3 71#define PPC_MSG_DEBUGGER_BREAK 3
75 72
76void smp_init_iSeries(void); 73void smp_init_iSeries(void);
@@ -117,6 +114,9 @@ extern void smp_generic_take_timebase(void);
117 114
118extern struct smp_ops_t *smp_ops; 115extern struct smp_ops_t *smp_ops;
119 116
117extern void arch_send_call_function_single_ipi(int cpu);
118extern void arch_send_call_function_ipi(cpumask_t mask);
119
120#endif /* __ASSEMBLY__ */ 120#endif /* __ASSEMBLY__ */
121 121
122#endif /* __KERNEL__ */ 122#endif /* __KERNEL__ */
diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h
index 9c8d34b07ebf..593343cd26ee 100644
--- a/include/asm-sh/smp.h
+++ b/include/asm-sh/smp.h
@@ -26,18 +26,10 @@ extern int __cpu_logical_map[NR_CPUS];
26 26
27#define NO_PROC_ID (-1) 27#define NO_PROC_ID (-1)
28 28
29struct smp_fn_call_struct {
30 spinlock_t lock;
31 atomic_t finished;
32 void (*fn)(void *);
33 void *data;
34};
35
36extern struct smp_fn_call_struct smp_fn_call;
37
38#define SMP_MSG_FUNCTION 0 29#define SMP_MSG_FUNCTION 0
39#define SMP_MSG_RESCHEDULE 1 30#define SMP_MSG_RESCHEDULE 1
40#define SMP_MSG_NR 2 31#define SMP_MSG_FUNCTION_SINGLE 2
32#define SMP_MSG_NR 3
41 33
42void plat_smp_setup(void); 34void plat_smp_setup(void);
43void plat_prepare_cpus(unsigned int max_cpus); 35void plat_prepare_cpus(unsigned int max_cpus);
@@ -46,6 +38,8 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point);
46void plat_send_ipi(unsigned int cpu, unsigned int message); 38void plat_send_ipi(unsigned int cpu, unsigned int message);
47int plat_register_ipi_handler(unsigned int message, 39int plat_register_ipi_handler(unsigned int message,
48 void (*handler)(void *), void *arg); 40 void (*handler)(void *), void *arg);
41extern void arch_send_call_function_single_ipi(int cpu);
42extern void arch_send_call_function_ipi(cpumask_t mask);
49 43
50#else 44#else
51 45
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index e6d561599726..b61e74bea06a 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -72,7 +72,7 @@ static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5) 72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } 73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74 74
75static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) 75static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
76{ 76{
77 xc1((smpfunc_t)func, (unsigned long)info); 77 xc1((smpfunc_t)func, (unsigned long)info);
78 return 0; 78 return 0;
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index 18f067c310f7..77ba51df5668 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -48,6 +48,7 @@ extern void irq_move_cleanup_interrupt(void);
48extern void threshold_interrupt(void); 48extern void threshold_interrupt(void);
49 49
50extern void call_function_interrupt(void); 50extern void call_function_interrupt(void);
51extern void call_function_single_interrupt(void);
51 52
52/* PIC specific functions */ 53/* PIC specific functions */
53extern void disable_8259A_irq(unsigned int irq); 54extern void disable_8259A_irq(unsigned int irq);
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index 0ac864ef3cd4..90b1d1f12f08 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -64,6 +64,7 @@
64# define INVALIDATE_TLB_VECTOR 0xfd 64# define INVALIDATE_TLB_VECTOR 0xfd
65# define RESCHEDULE_VECTOR 0xfc 65# define RESCHEDULE_VECTOR 0xfc
66# define CALL_FUNCTION_VECTOR 0xfb 66# define CALL_FUNCTION_VECTOR 0xfb
67# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
67# define THERMAL_APIC_VECTOR 0xf0 68# define THERMAL_APIC_VECTOR 0xf0
68 69
69#else 70#else
@@ -72,6 +73,7 @@
72#define ERROR_APIC_VECTOR 0xfe 73#define ERROR_APIC_VECTOR 0xfe
73#define RESCHEDULE_VECTOR 0xfd 74#define RESCHEDULE_VECTOR 0xfd
74#define CALL_FUNCTION_VECTOR 0xfc 75#define CALL_FUNCTION_VECTOR 0xfc
76#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
75#define THERMAL_APIC_VECTOR 0xfa 77#define THERMAL_APIC_VECTOR 0xfa
76#define THRESHOLD_APIC_VECTOR 0xf9 78#define THRESHOLD_APIC_VECTOR 0xf9
77#define INVALIDATE_TLB_VECTOR_END 0xf7 79#define INVALIDATE_TLB_VECTOR_END 0xf7
@@ -143,6 +145,7 @@
143#define VIC_RESCHEDULE_CPI 4 145#define VIC_RESCHEDULE_CPI 4
144#define VIC_ENABLE_IRQ_CPI 5 146#define VIC_ENABLE_IRQ_CPI 5
145#define VIC_CALL_FUNCTION_CPI 6 147#define VIC_CALL_FUNCTION_CPI 6
148#define VIC_CALL_FUNCTION_SINGLE_CPI 7
146 149
147/* Now the QIC CPIs: Since we don't need the two initial levels, 150/* Now the QIC CPIs: Since we don't need the two initial levels,
148 * these are 2 less than the VIC CPIs */ 151 * these are 2 less than the VIC CPIs */
@@ -152,9 +155,10 @@
152#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) 155#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
153#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) 156#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
154#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) 157#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
158#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
155 159
156#define VIC_START_FAKE_CPI VIC_TIMER_CPI 160#define VIC_START_FAKE_CPI VIC_TIMER_CPI
157#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI 161#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
158 162
159/* this is the SYS_INT CPI. */ 163/* this is the SYS_INT CPI. */
160#define VIC_SYS_INT 8 164#define VIC_SYS_INT 8
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index bc861469bdba..9283b60a1dd2 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -13,6 +13,7 @@
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) 14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
16#endif 17#endif
17 18
18/* 19/*
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h
index b183fa6d83d9..86be554342d4 100644
--- a/include/asm-x86/mach-visws/entry_arch.h
+++ b/include/asm-x86/mach-visws/entry_arch.h
@@ -1,23 +1,5 @@
1/* 1/*
2 * The following vectors are part of the Linux architecture, there 2 * VISWS uses the standard Linux entry points:
3 * is no hardware IRQ pin equivalent for them, they are triggered
4 * through the ICC by us (IPIs)
5 */ 3 */
6#ifdef CONFIG_X86_SMP
7BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
8BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
9BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
10#endif
11 4
12/* 5#include "../mach-default/entry_arch.h"
13 * every pentium local APIC has two 'local interrupts', with a
14 * soft-definable vector attached to both interrupts, one of
15 * which is a timer interrupt, the other one is error counter
16 * overflow. Linux uses the local APIC timer interrupt to get
17 * a much simpler SMP time architecture:
18 */
19#ifdef CONFIG_X86_LOCAL_APIC
20BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
21BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
22BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
23#endif
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
index 4a1e1e8c10b6..ae52624b5937 100644
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ b/include/asm-x86/mach-voyager/entry_arch.h
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); 23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); 24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); 25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26 26BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 2e221f1ce0b2..c2784b3e0b77 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -50,9 +50,9 @@ struct smp_ops {
50 50
51 void (*smp_send_stop)(void); 51 void (*smp_send_stop)(void);
52 void (*smp_send_reschedule)(int cpu); 52 void (*smp_send_reschedule)(int cpu);
53 int (*smp_call_function_mask)(cpumask_t mask, 53
54 void (*func)(void *info), void *info, 54 void (*send_call_func_ipi)(cpumask_t mask);
55 int wait); 55 void (*send_call_func_single_ipi)(int cpu);
56}; 56};
57 57
58/* Globals due to paravirt */ 58/* Globals due to paravirt */
@@ -94,17 +94,22 @@ static inline void smp_send_reschedule(int cpu)
94 smp_ops.smp_send_reschedule(cpu); 94 smp_ops.smp_send_reschedule(cpu);
95} 95}
96 96
97static inline int smp_call_function_mask(cpumask_t mask, 97static inline void arch_send_call_function_single_ipi(int cpu)
98 void (*func) (void *info), void *info, 98{
99 int wait) 99 smp_ops.send_call_func_single_ipi(cpu);
100}
101
102static inline void arch_send_call_function_ipi(cpumask_t mask)
100{ 103{
101 return smp_ops.smp_call_function_mask(mask, func, info, wait); 104 smp_ops.send_call_func_ipi(mask);
102} 105}
103 106
104void native_smp_prepare_boot_cpu(void); 107void native_smp_prepare_boot_cpu(void);
105void native_smp_prepare_cpus(unsigned int max_cpus); 108void native_smp_prepare_cpus(unsigned int max_cpus);
106void native_smp_cpus_done(unsigned int max_cpus); 109void native_smp_cpus_done(unsigned int max_cpus);
107int native_cpu_up(unsigned int cpunum); 110int native_cpu_up(unsigned int cpunum);
111void native_send_call_func_ipi(cpumask_t mask);
112void native_send_call_func_single_ipi(int cpu);
108 113
109extern int __cpu_disable(void); 114extern int __cpu_disable(void);
110extern void __cpu_die(unsigned int cpu); 115extern void __cpu_die(unsigned int cpu);
@@ -197,7 +202,5 @@ static inline int hard_smp_processor_id(void)
197extern void cpu_uninit(void); 202extern void cpu_uninit(void);
198#endif 203#endif
199 204
200extern void lock_ipi_call_lock(void);
201extern void unlock_ipi_call_lock(void);
202#endif /* __ASSEMBLY__ */ 205#endif /* __ASSEMBLY__ */
203#endif 206#endif
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index 596312a7bfc9..f8d57ea1f05f 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -4,6 +4,7 @@
4enum ipi_vector { 4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
7 8
8 XEN_NR_IPIS, 9 XEN_NR_IPIS,
9}; 10};
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232ccf9cfd..48262f86c969 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,18 @@
7 */ 7 */
8 8
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/list.h>
11#include <linux/cpumask.h>
10 12
11extern void cpu_idle(void); 13extern void cpu_idle(void);
12 14
15struct call_single_data {
16 struct list_head list;
17 void (*func) (void *info);
18 void *info;
19 unsigned int flags;
20};
21
13#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
14 23
15#include <linux/preempt.h> 24#include <linux/preempt.h>
@@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus);
52/* 61/*
53 * Call a function on all other processors 62 * Call a function on all other processors
54 */ 63 */
55int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); 64int smp_call_function(void(*func)(void *info), void *info, int wait);
56 65int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
66 int wait);
57int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 67int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
58 int retry, int wait); 68 int wait);
69void __smp_call_function_single(int cpuid, struct call_single_data *data);
70
71/*
72 * Generic and arch helpers
73 */
74#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
75void generic_smp_call_function_single_interrupt(void);
76void generic_smp_call_function_interrupt(void);
77void init_call_single_data(void);
78void ipi_call_lock(void);
79void ipi_call_unlock(void);
80void ipi_call_lock_irq(void);
81void ipi_call_unlock_irq(void);
82#else
83static inline void init_call_single_data(void)
84{
85}
86#endif
59 87
60/* 88/*
61 * Call a function on all processors 89 * Call a function on all processors
62 */ 90 */
63int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); 91int on_each_cpu(void (*func) (void *info), void *info, int wait);
64 92
65#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ 93#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
66#define MSG_ALL 0x8001 94#define MSG_ALL 0x8001
@@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
90{ 118{
91 return 0; 119 return 0;
92} 120}
93#define smp_call_function(func, info, retry, wait) \ 121#define smp_call_function(func, info, wait) \
94 (up_smp_call_function(func, info)) 122 (up_smp_call_function(func, info))
95#define on_each_cpu(func,info,retry,wait) \ 123#define on_each_cpu(func,info,wait) \
96 ({ \ 124 ({ \
97 local_irq_disable(); \ 125 local_irq_disable(); \
98 func(info); \ 126 func(info); \
@@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
102static inline void smp_send_reschedule(int cpu) { } 130static inline void smp_send_reschedule(int cpu) { }
103#define num_booting_cpus() 1 131#define num_booting_cpus() 1
104#define smp_prepare_boot_cpu() do {} while (0) 132#define smp_prepare_boot_cpu() do {} while (0)
105#define smp_call_function_single(cpuid, func, info, retry, wait) \ 133#define smp_call_function_single(cpuid, func, info, wait) \
106({ \ 134({ \
107 WARN_ON(cpuid != 0); \ 135 WARN_ON(cpuid != 0); \
108 local_irq_disable(); \ 136 local_irq_disable(); \
@@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { }
112}) 140})
113#define smp_call_function_mask(mask, func, info, wait) \ 141#define smp_call_function_mask(mask, func, info, wait) \
114 (up_smp_call_function(func, info)) 142 (up_smp_call_function(func, info))
115 143static inline void init_call_single_data(void)
144{
145}
116#endif /* !SMP */ 146#endif /* !SMP */
117 147
118/* 148/*