diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-alpha/smp.h | 5 | ||||
-rw-r--r-- | include/asm-arm/smp.h | 3 | ||||
-rw-r--r-- | include/asm-ia64/smp.h | 8 | ||||
-rw-r--r-- | include/asm-m32r/smp.h | 4 | ||||
-rw-r--r-- | include/asm-mips/smp.h | 13 | ||||
-rw-r--r-- | include/asm-parisc/smp.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/smp.h | 8 | ||||
-rw-r--r-- | include/asm-sh/smp.h | 14 | ||||
-rw-r--r-- | include/asm-sparc/smp.h | 2 | ||||
-rw-r--r-- | include/asm-x86/hw_irq.h | 1 | ||||
-rw-r--r-- | include/asm-x86/irq_vectors.h | 6 | ||||
-rw-r--r-- | include/asm-x86/mach-default/entry_arch.h | 1 | ||||
-rw-r--r-- | include/asm-x86/mach-visws/entry_arch.h | 22 | ||||
-rw-r--r-- | include/asm-x86/mach-voyager/entry_arch.h | 2 | ||||
-rw-r--r-- | include/asm-x86/smp.h | 21 | ||||
-rw-r--r-- | include/asm-x86/xen/events.h | 1 | ||||
-rw-r--r-- | include/linux/smp.h | 46 |
17 files changed, 89 insertions, 71 deletions
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index 286e1d844f63..544c69af8168 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h | |||
@@ -47,12 +47,13 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; | |||
47 | extern int smp_num_cpus; | 47 | extern int smp_num_cpus; |
48 | #define cpu_possible_map cpu_present_map | 48 | #define cpu_possible_map cpu_present_map |
49 | 49 | ||
50 | int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); | 50 | extern void arch_send_call_function_single_ipi(int cpu); |
51 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
51 | 52 | ||
52 | #else /* CONFIG_SMP */ | 53 | #else /* CONFIG_SMP */ |
53 | 54 | ||
54 | #define hard_smp_processor_id() 0 | 55 | #define hard_smp_processor_id() 0 |
55 | #define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) | 56 | #define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; }) |
56 | 57 | ||
57 | #endif /* CONFIG_SMP */ | 58 | #endif /* CONFIG_SMP */ |
58 | 59 | ||
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h index af99636db400..7fffa2404b8e 100644 --- a/include/asm-arm/smp.h +++ b/include/asm-arm/smp.h | |||
@@ -101,6 +101,9 @@ extern void platform_cpu_die(unsigned int cpu); | |||
101 | extern int platform_cpu_kill(unsigned int cpu); | 101 | extern int platform_cpu_kill(unsigned int cpu); |
102 | extern void platform_cpu_enable(unsigned int cpu); | 102 | extern void platform_cpu_enable(unsigned int cpu); |
103 | 103 | ||
104 | extern void arch_send_call_function_single_ipi(int cpu); | ||
105 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
106 | |||
104 | /* | 107 | /* |
105 | * Local timer interrupt handling function (can be IPI'ed). | 108 | * Local timer interrupt handling function (can be IPI'ed). |
106 | */ | 109 | */ |
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index ec5f355fb7e3..27731e032ee9 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h | |||
@@ -38,9 +38,6 @@ ia64_get_lid (void) | |||
38 | return lid.f.id << 8 | lid.f.eid; | 38 | return lid.f.id << 8 | lid.f.eid; |
39 | } | 39 | } |
40 | 40 | ||
41 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
42 | void *info, int wait); | ||
43 | |||
44 | #define hard_smp_processor_id() ia64_get_lid() | 41 | #define hard_smp_processor_id() ia64_get_lid() |
45 | 42 | ||
46 | #ifdef CONFIG_SMP | 43 | #ifdef CONFIG_SMP |
@@ -124,11 +121,12 @@ extern void __init init_smp_config (void); | |||
124 | extern void smp_do_timer (struct pt_regs *regs); | 121 | extern void smp_do_timer (struct pt_regs *regs); |
125 | 122 | ||
126 | extern void smp_send_reschedule (int cpu); | 123 | extern void smp_send_reschedule (int cpu); |
127 | extern void lock_ipi_calllock(void); | ||
128 | extern void unlock_ipi_calllock(void); | ||
129 | extern void identify_siblings (struct cpuinfo_ia64 *); | 124 | extern void identify_siblings (struct cpuinfo_ia64 *); |
130 | extern int is_multithreading_enabled(void); | 125 | extern int is_multithreading_enabled(void); |
131 | 126 | ||
127 | extern void arch_send_call_function_single_ipi(int cpu); | ||
128 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
129 | |||
132 | #else /* CONFIG_SMP */ | 130 | #else /* CONFIG_SMP */ |
133 | 131 | ||
134 | #define cpu_logical_id(i) 0 | 132 | #define cpu_logical_id(i) 0 |
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index 078e1a51a042..c5dd66916692 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h | |||
@@ -89,6 +89,9 @@ static __inline__ unsigned int num_booting_cpus(void) | |||
89 | extern void smp_send_timer(void); | 89 | extern void smp_send_timer(void); |
90 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 90 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); |
91 | 91 | ||
92 | extern void arch_send_call_function_single_ipi(int cpu); | ||
93 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
94 | |||
92 | #endif /* not __ASSEMBLY__ */ | 95 | #endif /* not __ASSEMBLY__ */ |
93 | 96 | ||
94 | #define NO_PROC_ID (0xff) /* No processor magic marker */ | 97 | #define NO_PROC_ID (0xff) /* No processor magic marker */ |
@@ -104,6 +107,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | |||
104 | #define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0) | 107 | #define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0) |
105 | #define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0) | 108 | #define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0) |
106 | #define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0) | 109 | #define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0) |
110 | #define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0) | ||
107 | 111 | ||
108 | #define IPI_SHIFT (0) | 112 | #define IPI_SHIFT (0) |
109 | #define NR_IPIS (8) | 113 | #define NR_IPIS (8) |
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index 84fef1aeec0c..0ff5b523ea77 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h | |||
@@ -35,16 +35,6 @@ extern int __cpu_logical_map[NR_CPUS]; | |||
35 | 35 | ||
36 | #define NO_PROC_ID (-1) | 36 | #define NO_PROC_ID (-1) |
37 | 37 | ||
38 | struct call_data_struct { | ||
39 | void (*func)(void *); | ||
40 | void *info; | ||
41 | atomic_t started; | ||
42 | atomic_t finished; | ||
43 | int wait; | ||
44 | }; | ||
45 | |||
46 | extern struct call_data_struct *call_data; | ||
47 | |||
48 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ | 38 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ |
49 | #define SMP_CALL_FUNCTION 0x2 | 39 | #define SMP_CALL_FUNCTION 0x2 |
50 | 40 | ||
@@ -67,4 +57,7 @@ static inline void smp_send_reschedule(int cpu) | |||
67 | 57 | ||
68 | extern asmlinkage void smp_call_function_interrupt(void); | 58 | extern asmlinkage void smp_call_function_interrupt(void); |
69 | 59 | ||
60 | extern void arch_send_call_function_single_ipi(int cpu); | ||
61 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
62 | |||
70 | #endif /* __ASM_SMP_H */ | 63 | #endif /* __ASM_SMP_H */ |
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index 306f4950e32e..398cdbaf4e54 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h | |||
@@ -30,6 +30,9 @@ extern cpumask_t cpu_online_map; | |||
30 | extern void smp_send_reschedule(int cpu); | 30 | extern void smp_send_reschedule(int cpu); |
31 | extern void smp_send_all_nop(void); | 31 | extern void smp_send_all_nop(void); |
32 | 32 | ||
33 | extern void arch_send_call_function_single_ipi(int cpu); | ||
34 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
35 | |||
33 | #endif /* !ASSEMBLY */ | 36 | #endif /* !ASSEMBLY */ |
34 | 37 | ||
35 | /* | 38 | /* |
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index 505f35bacaa9..c663a1fa77c5 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h | |||
@@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | |||
67 | * in /proc/interrupts will be wrong!!! --Troy */ | 67 | * in /proc/interrupts will be wrong!!! --Troy */ |
68 | #define PPC_MSG_CALL_FUNCTION 0 | 68 | #define PPC_MSG_CALL_FUNCTION 0 |
69 | #define PPC_MSG_RESCHEDULE 1 | 69 | #define PPC_MSG_RESCHEDULE 1 |
70 | /* This is unused now */ | 70 | #define PPC_MSG_CALL_FUNC_SINGLE 2 |
71 | #if 0 | ||
72 | #define PPC_MSG_MIGRATE_TASK 2 | ||
73 | #endif | ||
74 | #define PPC_MSG_DEBUGGER_BREAK 3 | 71 | #define PPC_MSG_DEBUGGER_BREAK 3 |
75 | 72 | ||
76 | void smp_init_iSeries(void); | 73 | void smp_init_iSeries(void); |
@@ -117,6 +114,9 @@ extern void smp_generic_take_timebase(void); | |||
117 | 114 | ||
118 | extern struct smp_ops_t *smp_ops; | 115 | extern struct smp_ops_t *smp_ops; |
119 | 116 | ||
117 | extern void arch_send_call_function_single_ipi(int cpu); | ||
118 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
119 | |||
120 | #endif /* __ASSEMBLY__ */ | 120 | #endif /* __ASSEMBLY__ */ |
121 | 121 | ||
122 | #endif /* __KERNEL__ */ | 122 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h index 9c8d34b07ebf..593343cd26ee 100644 --- a/include/asm-sh/smp.h +++ b/include/asm-sh/smp.h | |||
@@ -26,18 +26,10 @@ extern int __cpu_logical_map[NR_CPUS]; | |||
26 | 26 | ||
27 | #define NO_PROC_ID (-1) | 27 | #define NO_PROC_ID (-1) |
28 | 28 | ||
29 | struct smp_fn_call_struct { | ||
30 | spinlock_t lock; | ||
31 | atomic_t finished; | ||
32 | void (*fn)(void *); | ||
33 | void *data; | ||
34 | }; | ||
35 | |||
36 | extern struct smp_fn_call_struct smp_fn_call; | ||
37 | |||
38 | #define SMP_MSG_FUNCTION 0 | 29 | #define SMP_MSG_FUNCTION 0 |
39 | #define SMP_MSG_RESCHEDULE 1 | 30 | #define SMP_MSG_RESCHEDULE 1 |
40 | #define SMP_MSG_NR 2 | 31 | #define SMP_MSG_FUNCTION_SINGLE 2 |
32 | #define SMP_MSG_NR 3 | ||
41 | 33 | ||
42 | void plat_smp_setup(void); | 34 | void plat_smp_setup(void); |
43 | void plat_prepare_cpus(unsigned int max_cpus); | 35 | void plat_prepare_cpus(unsigned int max_cpus); |
@@ -46,6 +38,8 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point); | |||
46 | void plat_send_ipi(unsigned int cpu, unsigned int message); | 38 | void plat_send_ipi(unsigned int cpu, unsigned int message); |
47 | int plat_register_ipi_handler(unsigned int message, | 39 | int plat_register_ipi_handler(unsigned int message, |
48 | void (*handler)(void *), void *arg); | 40 | void (*handler)(void *), void *arg); |
41 | extern void arch_send_call_function_single_ipi(int cpu); | ||
42 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
49 | 43 | ||
50 | #else | 44 | #else |
51 | 45 | ||
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index e6d561599726..b61e74bea06a 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h | |||
@@ -72,7 +72,7 @@ static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, | |||
72 | unsigned long arg3, unsigned long arg4, unsigned long arg5) | 72 | unsigned long arg3, unsigned long arg4, unsigned long arg5) |
73 | { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } | 73 | { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } |
74 | 74 | ||
75 | static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) | 75 | static inline int smp_call_function(void (*func)(void *info), void *info, int wait) |
76 | { | 76 | { |
77 | xc1((smpfunc_t)func, (unsigned long)info); | 77 | xc1((smpfunc_t)func, (unsigned long)info); |
78 | return 0; | 78 | return 0; |
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h index 18f067c310f7..77ba51df5668 100644 --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h | |||
@@ -48,6 +48,7 @@ extern void irq_move_cleanup_interrupt(void); | |||
48 | extern void threshold_interrupt(void); | 48 | extern void threshold_interrupt(void); |
49 | 49 | ||
50 | extern void call_function_interrupt(void); | 50 | extern void call_function_interrupt(void); |
51 | extern void call_function_single_interrupt(void); | ||
51 | 52 | ||
52 | /* PIC specific functions */ | 53 | /* PIC specific functions */ |
53 | extern void disable_8259A_irq(unsigned int irq); | 54 | extern void disable_8259A_irq(unsigned int irq); |
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h index 0ac864ef3cd4..90b1d1f12f08 100644 --- a/include/asm-x86/irq_vectors.h +++ b/include/asm-x86/irq_vectors.h | |||
@@ -64,6 +64,7 @@ | |||
64 | # define INVALIDATE_TLB_VECTOR 0xfd | 64 | # define INVALIDATE_TLB_VECTOR 0xfd |
65 | # define RESCHEDULE_VECTOR 0xfc | 65 | # define RESCHEDULE_VECTOR 0xfc |
66 | # define CALL_FUNCTION_VECTOR 0xfb | 66 | # define CALL_FUNCTION_VECTOR 0xfb |
67 | # define CALL_FUNCTION_SINGLE_VECTOR 0xfa | ||
67 | # define THERMAL_APIC_VECTOR 0xf0 | 68 | # define THERMAL_APIC_VECTOR 0xf0 |
68 | 69 | ||
69 | #else | 70 | #else |
@@ -72,6 +73,7 @@ | |||
72 | #define ERROR_APIC_VECTOR 0xfe | 73 | #define ERROR_APIC_VECTOR 0xfe |
73 | #define RESCHEDULE_VECTOR 0xfd | 74 | #define RESCHEDULE_VECTOR 0xfd |
74 | #define CALL_FUNCTION_VECTOR 0xfc | 75 | #define CALL_FUNCTION_VECTOR 0xfc |
76 | #define CALL_FUNCTION_SINGLE_VECTOR 0xfb | ||
75 | #define THERMAL_APIC_VECTOR 0xfa | 77 | #define THERMAL_APIC_VECTOR 0xfa |
76 | #define THRESHOLD_APIC_VECTOR 0xf9 | 78 | #define THRESHOLD_APIC_VECTOR 0xf9 |
77 | #define INVALIDATE_TLB_VECTOR_END 0xf7 | 79 | #define INVALIDATE_TLB_VECTOR_END 0xf7 |
@@ -143,6 +145,7 @@ | |||
143 | #define VIC_RESCHEDULE_CPI 4 | 145 | #define VIC_RESCHEDULE_CPI 4 |
144 | #define VIC_ENABLE_IRQ_CPI 5 | 146 | #define VIC_ENABLE_IRQ_CPI 5 |
145 | #define VIC_CALL_FUNCTION_CPI 6 | 147 | #define VIC_CALL_FUNCTION_CPI 6 |
148 | #define VIC_CALL_FUNCTION_SINGLE_CPI 7 | ||
146 | 149 | ||
147 | /* Now the QIC CPIs: Since we don't need the two initial levels, | 150 | /* Now the QIC CPIs: Since we don't need the two initial levels, |
148 | * these are 2 less than the VIC CPIs */ | 151 | * these are 2 less than the VIC CPIs */ |
@@ -152,9 +155,10 @@ | |||
152 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) | 155 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) |
153 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) | 156 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) |
154 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) | 157 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) |
158 | #define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET) | ||
155 | 159 | ||
156 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI | 160 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI |
157 | #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI | 161 | #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI |
158 | 162 | ||
159 | /* this is the SYS_INT CPI. */ | 163 | /* this is the SYS_INT CPI. */ |
160 | #define VIC_SYS_INT 8 | 164 | #define VIC_SYS_INT 8 |
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h index bc861469bdba..9283b60a1dd2 100644 --- a/include/asm-x86/mach-default/entry_arch.h +++ b/include/asm-x86/mach-default/entry_arch.h | |||
@@ -13,6 +13,7 @@ | |||
13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | 13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) |
14 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) | 14 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) |
15 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | 15 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) |
16 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | ||
16 | #endif | 17 | #endif |
17 | 18 | ||
18 | /* | 19 | /* |
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h index b183fa6d83d9..86be554342d4 100644 --- a/include/asm-x86/mach-visws/entry_arch.h +++ b/include/asm-x86/mach-visws/entry_arch.h | |||
@@ -1,23 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * The following vectors are part of the Linux architecture, there | 2 | * VISWS uses the standard Linux entry points: |
3 | * is no hardware IRQ pin equivalent for them, they are triggered | ||
4 | * through the ICC by us (IPIs) | ||
5 | */ | 3 | */ |
6 | #ifdef CONFIG_X86_SMP | ||
7 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | ||
8 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) | ||
9 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | ||
10 | #endif | ||
11 | 4 | ||
12 | /* | 5 | #include "../mach-default/entry_arch.h" |
13 | * every pentium local APIC has two 'local interrupts', with a | ||
14 | * soft-definable vector attached to both interrupts, one of | ||
15 | * which is a timer interrupt, the other one is error counter | ||
16 | * overflow. Linux uses the local APIC timer interrupt to get | ||
17 | * a much simpler SMP time architecture: | ||
18 | */ | ||
19 | #ifdef CONFIG_X86_LOCAL_APIC | ||
20 | BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) | ||
21 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | ||
22 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | ||
23 | #endif | ||
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h index 4a1e1e8c10b6..ae52624b5937 100644 --- a/include/asm-x86/mach-voyager/entry_arch.h +++ b/include/asm-x86/mach-voyager/entry_arch.h | |||
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); | |||
23 | BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); | 23 | BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); |
24 | BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); | 24 | BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); |
25 | BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); | 25 | BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); |
26 | 26 | BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI); | |
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 2e221f1ce0b2..c2784b3e0b77 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
@@ -50,9 +50,9 @@ struct smp_ops { | |||
50 | 50 | ||
51 | void (*smp_send_stop)(void); | 51 | void (*smp_send_stop)(void); |
52 | void (*smp_send_reschedule)(int cpu); | 52 | void (*smp_send_reschedule)(int cpu); |
53 | int (*smp_call_function_mask)(cpumask_t mask, | 53 | |
54 | void (*func)(void *info), void *info, | 54 | void (*send_call_func_ipi)(cpumask_t mask); |
55 | int wait); | 55 | void (*send_call_func_single_ipi)(int cpu); |
56 | }; | 56 | }; |
57 | 57 | ||
58 | /* Globals due to paravirt */ | 58 | /* Globals due to paravirt */ |
@@ -94,17 +94,22 @@ static inline void smp_send_reschedule(int cpu) | |||
94 | smp_ops.smp_send_reschedule(cpu); | 94 | smp_ops.smp_send_reschedule(cpu); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline int smp_call_function_mask(cpumask_t mask, | 97 | static inline void arch_send_call_function_single_ipi(int cpu) |
98 | void (*func) (void *info), void *info, | 98 | { |
99 | int wait) | 99 | smp_ops.send_call_func_single_ipi(cpu); |
100 | } | ||
101 | |||
102 | static inline void arch_send_call_function_ipi(cpumask_t mask) | ||
100 | { | 103 | { |
101 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | 104 | smp_ops.send_call_func_ipi(mask); |
102 | } | 105 | } |
103 | 106 | ||
104 | void native_smp_prepare_boot_cpu(void); | 107 | void native_smp_prepare_boot_cpu(void); |
105 | void native_smp_prepare_cpus(unsigned int max_cpus); | 108 | void native_smp_prepare_cpus(unsigned int max_cpus); |
106 | void native_smp_cpus_done(unsigned int max_cpus); | 109 | void native_smp_cpus_done(unsigned int max_cpus); |
107 | int native_cpu_up(unsigned int cpunum); | 110 | int native_cpu_up(unsigned int cpunum); |
111 | void native_send_call_func_ipi(cpumask_t mask); | ||
112 | void native_send_call_func_single_ipi(int cpu); | ||
108 | 113 | ||
109 | extern int __cpu_disable(void); | 114 | extern int __cpu_disable(void); |
110 | extern void __cpu_die(unsigned int cpu); | 115 | extern void __cpu_die(unsigned int cpu); |
@@ -197,7 +202,5 @@ static inline int hard_smp_processor_id(void) | |||
197 | extern void cpu_uninit(void); | 202 | extern void cpu_uninit(void); |
198 | #endif | 203 | #endif |
199 | 204 | ||
200 | extern void lock_ipi_call_lock(void); | ||
201 | extern void unlock_ipi_call_lock(void); | ||
202 | #endif /* __ASSEMBLY__ */ | 205 | #endif /* __ASSEMBLY__ */ |
203 | #endif | 206 | #endif |
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h index 596312a7bfc9..f8d57ea1f05f 100644 --- a/include/asm-x86/xen/events.h +++ b/include/asm-x86/xen/events.h | |||
@@ -4,6 +4,7 @@ | |||
4 | enum ipi_vector { | 4 | enum ipi_vector { |
5 | XEN_RESCHEDULE_VECTOR, | 5 | XEN_RESCHEDULE_VECTOR, |
6 | XEN_CALL_FUNCTION_VECTOR, | 6 | XEN_CALL_FUNCTION_VECTOR, |
7 | XEN_CALL_FUNCTION_SINGLE_VECTOR, | ||
7 | 8 | ||
8 | XEN_NR_IPIS, | 9 | XEN_NR_IPIS, |
9 | }; | 10 | }; |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 55232ccf9cfd..48262f86c969 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -7,9 +7,18 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/list.h> | ||
11 | #include <linux/cpumask.h> | ||
10 | 12 | ||
11 | extern void cpu_idle(void); | 13 | extern void cpu_idle(void); |
12 | 14 | ||
15 | struct call_single_data { | ||
16 | struct list_head list; | ||
17 | void (*func) (void *info); | ||
18 | void *info; | ||
19 | unsigned int flags; | ||
20 | }; | ||
21 | |||
13 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
14 | 23 | ||
15 | #include <linux/preempt.h> | 24 | #include <linux/preempt.h> |
@@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus); | |||
52 | /* | 61 | /* |
53 | * Call a function on all other processors | 62 | * Call a function on all other processors |
54 | */ | 63 | */ |
55 | int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); | 64 | int smp_call_function(void(*func)(void *info), void *info, int wait); |
56 | 65 | int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | |
66 | int wait); | ||
57 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | 67 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, |
58 | int retry, int wait); | 68 | int wait); |
69 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | ||
70 | |||
71 | /* | ||
72 | * Generic and arch helpers | ||
73 | */ | ||
74 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
75 | void generic_smp_call_function_single_interrupt(void); | ||
76 | void generic_smp_call_function_interrupt(void); | ||
77 | void init_call_single_data(void); | ||
78 | void ipi_call_lock(void); | ||
79 | void ipi_call_unlock(void); | ||
80 | void ipi_call_lock_irq(void); | ||
81 | void ipi_call_unlock_irq(void); | ||
82 | #else | ||
83 | static inline void init_call_single_data(void) | ||
84 | { | ||
85 | } | ||
86 | #endif | ||
59 | 87 | ||
60 | /* | 88 | /* |
61 | * Call a function on all processors | 89 | * Call a function on all processors |
62 | */ | 90 | */ |
63 | int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); | 91 | int on_each_cpu(void (*func) (void *info), void *info, int wait); |
64 | 92 | ||
65 | #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ | 93 | #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ |
66 | #define MSG_ALL 0x8001 | 94 | #define MSG_ALL 0x8001 |
@@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) | |||
90 | { | 118 | { |
91 | return 0; | 119 | return 0; |
92 | } | 120 | } |
93 | #define smp_call_function(func, info, retry, wait) \ | 121 | #define smp_call_function(func, info, wait) \ |
94 | (up_smp_call_function(func, info)) | 122 | (up_smp_call_function(func, info)) |
95 | #define on_each_cpu(func,info,retry,wait) \ | 123 | #define on_each_cpu(func,info,wait) \ |
96 | ({ \ | 124 | ({ \ |
97 | local_irq_disable(); \ | 125 | local_irq_disable(); \ |
98 | func(info); \ | 126 | func(info); \ |
@@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info) | |||
102 | static inline void smp_send_reschedule(int cpu) { } | 130 | static inline void smp_send_reschedule(int cpu) { } |
103 | #define num_booting_cpus() 1 | 131 | #define num_booting_cpus() 1 |
104 | #define smp_prepare_boot_cpu() do {} while (0) | 132 | #define smp_prepare_boot_cpu() do {} while (0) |
105 | #define smp_call_function_single(cpuid, func, info, retry, wait) \ | 133 | #define smp_call_function_single(cpuid, func, info, wait) \ |
106 | ({ \ | 134 | ({ \ |
107 | WARN_ON(cpuid != 0); \ | 135 | WARN_ON(cpuid != 0); \ |
108 | local_irq_disable(); \ | 136 | local_irq_disable(); \ |
@@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { } | |||
112 | }) | 140 | }) |
113 | #define smp_call_function_mask(mask, func, info, wait) \ | 141 | #define smp_call_function_mask(mask, func, info, wait) \ |
114 | (up_smp_call_function(func, info)) | 142 | (up_smp_call_function(func, info)) |
115 | 143 | static inline void init_call_single_data(void) | |
144 | { | ||
145 | } | ||
116 | #endif /* !SMP */ | 146 | #endif /* !SMP */ |
117 | 147 | ||
118 | /* | 148 | /* |