aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-26 05:21:54 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:21:54 -0400
commit3b16cf874861436725c43ba0b68bdd799297be7c (patch)
tree8e48647e3dce5dde6917f260f93c4b9f19945c55
parent3d4422332711ef48ef0f132f1fcbfcbd56c7f3d1 (diff)
x86: convert to generic helpers for IPI function calls
This converts x86, x86-64, and xen to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/apic_32.c4
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/i8259_64.c4
-rw-r--r--arch/x86/kernel/smp.c158
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/smpcommon.c56
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c94
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/smp.c133
-rw-r--r--arch/x86/xen/xen-ops.h9
-rw-r--r--include/asm-x86/hw_irq_32.h1
-rw-r--r--include/asm-x86/hw_irq_64.h2
-rw-r--r--include/asm-x86/mach-default/entry_arch.h1
-rw-r--r--include/asm-x86/mach-default/irq_vectors.h1
-rw-r--r--include/asm-x86/mach-voyager/entry_arch.h2
-rw-r--r--include/asm-x86/mach-voyager/irq_vectors.h4
-rw-r--r--include/asm-x86/smp.h21
-rw-r--r--include/asm-x86/xen/events.h1
20 files changed, 125 insertions, 380 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e0edaaa6920a..2f3fbebf51d8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -168,6 +168,7 @@ config GENERIC_PENDING_IRQ
168config X86_SMP 168config X86_SMP
169 bool 169 bool
170 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) 170 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
171 select USE_GENERIC_SMP_HELPERS
171 default y 172 default y
172 173
173config X86_32_SMP 174config X86_32_SMP
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 4b99b1bdeb6c..71017f71f4bc 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1358,6 +1358,10 @@ void __init smp_intr_init(void)
1358 1358
1359 /* IPI for generic function call */ 1359 /* IPI for generic function call */
1360 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 1360 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1361
1362 /* IPI for single call function */
1363 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
1364 call_function_single_interrupt);
1361} 1365}
1362#endif 1366#endif
1363 1367
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 556a8df522a7..6d1fe270a96d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -711,6 +711,9 @@ END(invalidate_interrupt\num)
711ENTRY(call_function_interrupt) 711ENTRY(call_function_interrupt)
712 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt 712 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
713END(call_function_interrupt) 713END(call_function_interrupt)
714ENTRY(call_function_single_interrupt)
715 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
716END(call_function_single_interrupt)
714ENTRY(irq_move_cleanup_interrupt) 717ENTRY(irq_move_cleanup_interrupt)
715 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt 718 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
716END(irq_move_cleanup_interrupt) 719END(irq_move_cleanup_interrupt)
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
index fa57a1568508..00d2ccdc69f8 100644
--- a/arch/x86/kernel/i8259_64.c
+++ b/arch/x86/kernel/i8259_64.c
@@ -494,6 +494,10 @@ void __init native_init_IRQ(void)
494 /* IPI for generic function call */ 494 /* IPI for generic function call */
495 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 495 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
496 496
497 /* IPI for generic single function call */
498 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
499 call_function_single_interrupt);
500
497 /* Low priority IPI to cleanup after moving an irq */ 501 /* Low priority IPI to cleanup after moving an irq */
498 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 502 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
499#endif 503#endif
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 0cb7aadc87cd..575aa3d7248a 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu)
121 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
122} 122}
123 123
124/* 124void native_send_call_func_single_ipi(int cpu)
125 * Structure and data for smp_call_function(). This is designed to minimise
126 * static memory requirements. It also looks cleaner.
127 */
128static DEFINE_SPINLOCK(call_lock);
129
130struct call_data_struct {
131 void (*func) (void *info);
132 void *info;
133 atomic_t started;
134 atomic_t finished;
135 int wait;
136};
137
138void lock_ipi_call_lock(void)
139{ 125{
140 spin_lock_irq(&call_lock); 126 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
141}
142
143void unlock_ipi_call_lock(void)
144{
145 spin_unlock_irq(&call_lock);
146}
147
148static struct call_data_struct *call_data;
149
150static void __smp_call_function(void (*func) (void *info), void *info,
151 int nonatomic, int wait)
152{
153 struct call_data_struct data;
154 int cpus = num_online_cpus() - 1;
155
156 if (!cpus)
157 return;
158
159 data.func = func;
160 data.info = info;
161 atomic_set(&data.started, 0);
162 data.wait = wait;
163 if (wait)
164 atomic_set(&data.finished, 0);
165
166 call_data = &data;
167 mb();
168
169 /* Send a message to all other CPUs and wait for them to respond */
170 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
171
172 /* Wait for response */
173 while (atomic_read(&data.started) != cpus)
174 cpu_relax();
175
176 if (wait)
177 while (atomic_read(&data.finished) != cpus)
178 cpu_relax();
179} 127}
180 128
181 129void native_send_call_func_ipi(cpumask_t mask)
182/**
183 * smp_call_function_mask(): Run a function on a set of other CPUs.
184 * @mask: The set of cpus to run on. Must not include the current cpu.
185 * @func: The function to run. This must be fast and non-blocking.
186 * @info: An arbitrary pointer to pass to the function.
187 * @wait: If true, wait (atomically) until function has completed on other CPUs.
188 *
189 * Returns 0 on success, else a negative status code.
190 *
191 * If @wait is true, then returns once @func has returned; otherwise
192 * it returns just before the target cpu calls @func.
193 *
194 * You must not call this function with disabled interrupts or from a
195 * hardware interrupt handler or from a bottom half handler.
196 */
197static int
198native_smp_call_function_mask(cpumask_t mask,
199 void (*func)(void *), void *info,
200 int wait)
201{ 130{
202 struct call_data_struct data;
203 cpumask_t allbutself; 131 cpumask_t allbutself;
204 int cpus;
205
206 /* Can deadlock when called with interrupts disabled */
207 WARN_ON(irqs_disabled());
208
209 /* Holding any lock stops cpus from going down. */
210 spin_lock(&call_lock);
211 132
212 allbutself = cpu_online_map; 133 allbutself = cpu_online_map;
213 cpu_clear(smp_processor_id(), allbutself); 134 cpu_clear(smp_processor_id(), allbutself);
214 135
215 cpus_and(mask, mask, allbutself);
216 cpus = cpus_weight(mask);
217
218 if (!cpus) {
219 spin_unlock(&call_lock);
220 return 0;
221 }
222
223 data.func = func;
224 data.info = info;
225 atomic_set(&data.started, 0);
226 data.wait = wait;
227 if (wait)
228 atomic_set(&data.finished, 0);
229
230 call_data = &data;
231 wmb();
232
233 /* Send a message to other CPUs */
234 if (cpus_equal(mask, allbutself) && 136 if (cpus_equal(mask, allbutself) &&
235 cpus_equal(cpu_online_map, cpu_callout_map)) 137 cpus_equal(cpu_online_map, cpu_callout_map))
236 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 138 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
237 else 139 else
238 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 140 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
239
240 /* Wait for response */
241 while (atomic_read(&data.started) != cpus)
242 cpu_relax();
243
244 if (wait)
245 while (atomic_read(&data.finished) != cpus)
246 cpu_relax();
247 spin_unlock(&call_lock);
248
249 return 0;
250} 141}
251 142
252static void stop_this_cpu(void *dummy) 143static void stop_this_cpu(void *dummy)
@@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy)
268 159
269static void native_smp_send_stop(void) 160static void native_smp_send_stop(void)
270{ 161{
271 int nolock;
272 unsigned long flags; 162 unsigned long flags;
273 163
274 if (reboot_force) 164 if (reboot_force)
275 return; 165 return;
276 166
277 /* Don't deadlock on the call lock in panic */ 167 smp_call_function(stop_this_cpu, NULL, 0, 0);
278 nolock = !spin_trylock(&call_lock);
279 local_irq_save(flags); 168 local_irq_save(flags);
280 __smp_call_function(stop_this_cpu, NULL, 0, 0);
281 if (!nolock)
282 spin_unlock(&call_lock);
283 disable_local_APIC(); 169 disable_local_APIC();
284 local_irq_restore(flags); 170 local_irq_restore(flags);
285} 171}
@@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
301 187
302void smp_call_function_interrupt(struct pt_regs *regs) 188void smp_call_function_interrupt(struct pt_regs *regs)
303{ 189{
304 void (*func) (void *info) = call_data->func;
305 void *info = call_data->info;
306 int wait = call_data->wait;
307
308 ack_APIC_irq(); 190 ack_APIC_irq();
309 /*
310 * Notify initiating CPU that I've grabbed the data and am
311 * about to execute the function
312 */
313 mb();
314 atomic_inc(&call_data->started);
315 /*
316 * At this point the info structure may be out of scope unless wait==1
317 */
318 irq_enter(); 191 irq_enter();
319 (*func)(info); 192 generic_smp_call_function_interrupt();
320#ifdef CONFIG_X86_32 193#ifdef CONFIG_X86_32
321 __get_cpu_var(irq_stat).irq_call_count++; 194 __get_cpu_var(irq_stat).irq_call_count++;
322#else 195#else
323 add_pda(irq_call_count, 1); 196 add_pda(irq_call_count, 1);
324#endif 197#endif
325 irq_exit(); 198 irq_exit();
199}
326 200
327 if (wait) { 201void smp_call_function_single_interrupt(void)
328 mb(); 202{
329 atomic_inc(&call_data->finished); 203 ack_APIC_irq();
330 } 204 irq_enter();
205 generic_smp_call_function_single_interrupt();
206#ifdef CONFIG_X86_32
207 __get_cpu_var(irq_stat).irq_call_count++;
208#else
209 add_pda(irq_call_count, 1);
210#endif
211 irq_exit();
331} 212}
332 213
333struct smp_ops smp_ops = { 214struct smp_ops smp_ops = {
@@ -338,7 +219,8 @@ struct smp_ops smp_ops = {
338 219
339 .smp_send_stop = native_smp_send_stop, 220 .smp_send_stop = native_smp_send_stop,
340 .smp_send_reschedule = native_smp_send_reschedule, 221 .smp_send_reschedule = native_smp_send_reschedule,
341 .smp_call_function_mask = native_smp_call_function_mask, 222
223 .send_call_func_ipi = native_send_call_func_ipi,
224 .send_call_func_single_ipi = native_send_call_func_single_ipi,
342}; 225};
343EXPORT_SYMBOL_GPL(smp_ops); 226EXPORT_SYMBOL_GPL(smp_ops);
344
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 56078d61c793..89647898f546 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -345,7 +345,7 @@ static void __cpuinit start_secondary(void *unused)
345 * lock helps us to not include this cpu in a currently in progress 345 * lock helps us to not include this cpu in a currently in progress
346 * smp_call_function(). 346 * smp_call_function().
347 */ 347 */
348 lock_ipi_call_lock(); 348 ipi_call_lock_irq();
349#ifdef CONFIG_X86_64 349#ifdef CONFIG_X86_64
350 spin_lock(&vector_lock); 350 spin_lock(&vector_lock);
351 351
@@ -357,7 +357,7 @@ static void __cpuinit start_secondary(void *unused)
357 spin_unlock(&vector_lock); 357 spin_unlock(&vector_lock);
358#endif 358#endif
359 cpu_set(smp_processor_id(), cpu_online_map); 359 cpu_set(smp_processor_id(), cpu_online_map);
360 unlock_ipi_call_lock(); 360 ipi_call_unlock_irq();
361 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 361 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
362 362
363 setup_secondary_clock(); 363 setup_secondary_clock();
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 3449064d141a..99941b37eca0 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu)
25 per_cpu(cpu_number, cpu) = cpu; 25 per_cpu(cpu_number, cpu) = cpu;
26} 26}
27#endif 27#endif
28
29/**
30 * smp_call_function(): Run a function on all other CPUs.
31 * @func: The function to run. This must be fast and non-blocking.
32 * @info: An arbitrary pointer to pass to the function.
33 * @nonatomic: Unused.
34 * @wait: If true, wait (atomically) until function has completed on other CPUs.
35 *
36 * Returns 0 on success, else a negative status code.
37 *
38 * If @wait is true, then returns once @func has returned; otherwise
39 * it returns just before the target cpu calls @func.
40 *
41 * You must not call this function with disabled interrupts or from a
42 * hardware interrupt handler or from a bottom half handler.
43 */
44int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
45 int wait)
46{
47 return smp_call_function_mask(cpu_online_map, func, info, wait);
48}
49EXPORT_SYMBOL(smp_call_function);
50
51/**
52 * smp_call_function_single - Run a function on a specific CPU
53 * @cpu: The target CPU. Cannot be the calling CPU.
54 * @func: The function to run. This must be fast and non-blocking.
55 * @info: An arbitrary pointer to pass to the function.
56 * @nonatomic: Unused.
57 * @wait: If true, wait until function has completed on other CPUs.
58 *
59 * Returns 0 on success, else a negative status code.
60 *
61 * If @wait is true, then returns once @func has returned; otherwise
62 * it returns just before the target cpu calls @func.
63 */
64int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
65 int nonatomic, int wait)
66{
67 /* prevent preemption and reschedule on another processor */
68 int ret;
69 int me = get_cpu();
70 if (cpu == me) {
71 local_irq_disable();
72 func(info);
73 local_irq_enable();
74 put_cpu();
75 return 0;
76 }
77
78 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
79
80 put_cpu();
81 return ret;
82}
83EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 8acbf0cdf1a5..cb34407a9930 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -955,94 +955,24 @@ static void smp_stop_cpu_function(void *dummy)
955 halt(); 955 halt();
956} 956}
957 957
958static DEFINE_SPINLOCK(call_lock);
959
960struct call_data_struct {
961 void (*func) (void *info);
962 void *info;
963 volatile unsigned long started;
964 volatile unsigned long finished;
965 int wait;
966};
967
968static struct call_data_struct *call_data;
969
970/* execute a thread on a new CPU. The function to be called must be 958/* execute a thread on a new CPU. The function to be called must be
971 * previously set up. This is used to schedule a function for 959 * previously set up. This is used to schedule a function for
972 * execution on all CPUs - set up the function then broadcast a 960 * execution on all CPUs - set up the function then broadcast a
973 * function_interrupt CPI to come here on each CPU */ 961 * function_interrupt CPI to come here on each CPU */
974static void smp_call_function_interrupt(void) 962static void smp_call_function_interrupt(void)
975{ 963{
976 void (*func) (void *info) = call_data->func;
977 void *info = call_data->info;
978 /* must take copy of wait because call_data may be replaced
979 * unless the function is waiting for us to finish */
980 int wait = call_data->wait;
981 __u8 cpu = smp_processor_id();
982
983 /*
984 * Notify initiating CPU that I've grabbed the data and am
985 * about to execute the function
986 */
987 mb();
988 if (!test_and_clear_bit(cpu, &call_data->started)) {
989 /* If the bit wasn't set, this could be a replay */
990 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
991 " with no call pending\n", cpu);
992 return;
993 }
994 /*
995 * At this point the info structure may be out of scope unless wait==1
996 */
997 irq_enter(); 964 irq_enter();
998 (*func) (info); 965 generic_smp_call_function_interrupt();
999 __get_cpu_var(irq_stat).irq_call_count++; 966 __get_cpu_var(irq_stat).irq_call_count++;
1000 irq_exit(); 967 irq_exit();
1001 if (wait) {
1002 mb();
1003 clear_bit(cpu, &call_data->finished);
1004 }
1005} 968}
1006 969
1007static int 970static void smp_call_function_single_interrupt(void)
1008voyager_smp_call_function_mask(cpumask_t cpumask,
1009 void (*func) (void *info), void *info, int wait)
1010{ 971{
1011 struct call_data_struct data; 972 irq_enter();
1012 u32 mask = cpus_addr(cpumask)[0]; 973 generic_smp_call_function_single_interrupt();
1013 974 __get_cpu_var(irq_stat).irq_call_count++;
1014 mask &= ~(1 << smp_processor_id()); 975 irq_exit();
1015
1016 if (!mask)
1017 return 0;
1018
1019 /* Can deadlock when called with interrupts disabled */
1020 WARN_ON(irqs_disabled());
1021
1022 data.func = func;
1023 data.info = info;
1024 data.started = mask;
1025 data.wait = wait;
1026 if (wait)
1027 data.finished = mask;
1028
1029 spin_lock(&call_lock);
1030 call_data = &data;
1031 wmb();
1032 /* Send a message to all other CPUs and wait for them to respond */
1033 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1034
1035 /* Wait for response */
1036 while (data.started)
1037 barrier();
1038
1039 if (wait)
1040 while (data.finished)
1041 barrier();
1042
1043 spin_unlock(&call_lock);
1044
1045 return 0;
1046} 976}
1047 977
1048/* Sorry about the name. In an APIC based system, the APICs 978/* Sorry about the name. In an APIC based system, the APICs
@@ -1099,6 +1029,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs)
1099 smp_call_function_interrupt(); 1029 smp_call_function_interrupt();
1100} 1030}
1101 1031
1032void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
1033{
1034 ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
1035 smp_call_function_single_interrupt();
1036}
1037
1102void smp_vic_cpi_interrupt(struct pt_regs *regs) 1038void smp_vic_cpi_interrupt(struct pt_regs *regs)
1103{ 1039{
1104 struct pt_regs *old_regs = set_irq_regs(regs); 1040 struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1119,6 +1055,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs)
1119 smp_enable_irq_interrupt(); 1055 smp_enable_irq_interrupt();
1120 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) 1056 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1121 smp_call_function_interrupt(); 1057 smp_call_function_interrupt();
1058 if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
1059 smp_call_function_single_interrupt();
1122 set_irq_regs(old_regs); 1060 set_irq_regs(old_regs);
1123} 1061}
1124 1062
@@ -1862,5 +1800,7 @@ struct smp_ops smp_ops = {
1862 1800
1863 .smp_send_stop = voyager_smp_send_stop, 1801 .smp_send_stop = voyager_smp_send_stop,
1864 .smp_send_reschedule = voyager_smp_send_reschedule, 1802 .smp_send_reschedule = voyager_smp_send_reschedule,
1865 .smp_call_function_mask = voyager_smp_call_function_mask, 1803
1804 .send_call_func_ipi = native_send_call_func_ipi,
1805 .send_call_func_single_ipi = native_send_call_func_single_ipi,
1866}; 1806};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index f09c1c69c37a..8e317782fe37 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1108,7 +1108,9 @@ static const struct smp_ops xen_smp_ops __initdata = {
1108 1108
1109 .smp_send_stop = xen_smp_send_stop, 1109 .smp_send_stop = xen_smp_send_stop,
1110 .smp_send_reschedule = xen_smp_send_reschedule, 1110 .smp_send_reschedule = xen_smp_send_reschedule,
1111 .smp_call_function_mask = xen_smp_call_function_mask, 1111
1112 .send_call_func_ipi = xen_smp_send_call_function_ipi,
1113 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
1112}; 1114};
1113#endif /* CONFIG_SMP */ 1115#endif /* CONFIG_SMP */
1114 1116
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index df40bf74ea75..5c01590380bc 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -558,7 +558,7 @@ static void drop_mm_ref(struct mm_struct *mm)
558 } 558 }
559 559
560 if (!cpus_empty(mask)) 560 if (!cpus_empty(mask))
561 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); 561 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
562} 562}
563#else 563#else
564static void drop_mm_ref(struct mm_struct *mm) 564static void drop_mm_ref(struct mm_struct *mm)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 94e69000f982..b3786e749b8e 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -36,27 +36,14 @@
36#include "mmu.h" 36#include "mmu.h"
37 37
38static cpumask_t xen_cpu_initialized_map; 38static cpumask_t xen_cpu_initialized_map;
39static DEFINE_PER_CPU(int, resched_irq) = -1;
40static DEFINE_PER_CPU(int, callfunc_irq) = -1;
41static DEFINE_PER_CPU(int, debug_irq) = -1;
42
43/*
44 * Structure and data for smp_call_function(). This is designed to minimise
45 * static memory requirements. It also looks cleaner.
46 */
47static DEFINE_SPINLOCK(call_lock);
48 39
49struct call_data_struct { 40static DEFINE_PER_CPU(int, resched_irq);
50 void (*func) (void *info); 41static DEFINE_PER_CPU(int, callfunc_irq);
51 void *info; 42static DEFINE_PER_CPU(int, callfuncsingle_irq);
52 atomic_t started; 43static DEFINE_PER_CPU(int, debug_irq) = -1;
53 atomic_t finished;
54 int wait;
55};
56 44
57static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 45static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
58 46static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
59static struct call_data_struct *call_data;
60 47
61/* 48/*
62 * Reschedule call back. Nothing to do, 49 * Reschedule call back. Nothing to do,
@@ -122,6 +109,17 @@ static int xen_smp_intr_init(unsigned int cpu)
122 goto fail; 109 goto fail;
123 per_cpu(debug_irq, cpu) = rc; 110 per_cpu(debug_irq, cpu) = rc;
124 111
112 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
113 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
114 cpu,
115 xen_call_function_single_interrupt,
116 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
117 callfunc_name,
118 NULL);
119 if (rc < 0)
120 goto fail;
121 per_cpu(callfuncsingle_irq, cpu) = rc;
122
125 return 0; 123 return 0;
126 124
127 fail: 125 fail:
@@ -131,6 +129,9 @@ static int xen_smp_intr_init(unsigned int cpu)
131 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 129 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
132 if (per_cpu(debug_irq, cpu) >= 0) 130 if (per_cpu(debug_irq, cpu) >= 0)
133 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 131 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
132 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
133 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
134
134 return rc; 135 return rc;
135} 136}
136 137
@@ -338,7 +339,6 @@ void xen_smp_send_reschedule(int cpu)
338 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 339 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
339} 340}
340 341
341
342static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 342static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
343{ 343{
344 unsigned cpu; 344 unsigned cpu;
@@ -349,83 +349,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
349 xen_send_IPI_one(cpu, vector); 349 xen_send_IPI_one(cpu, vector);
350} 350}
351 351
352void xen_smp_send_call_function_ipi(cpumask_t mask)
353{
354 int cpu;
355
356 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
357
358 /* Make sure other vcpus get a chance to run if they need to. */
359 for_each_cpu_mask(cpu, mask) {
360 if (xen_vcpu_stolen(cpu)) {
361 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
362 break;
363 }
364 }
365}
366
367void xen_smp_send_call_function_single_ipi(int cpu)
368{
369 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
370}
371
352static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 372static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
353{ 373{
354 void (*func) (void *info) = call_data->func;
355 void *info = call_data->info;
356 int wait = call_data->wait;
357
358 /*
359 * Notify initiating CPU that I've grabbed the data and am
360 * about to execute the function
361 */
362 mb();
363 atomic_inc(&call_data->started);
364 /*
365 * At this point the info structure may be out of scope unless wait==1
366 */
367 irq_enter(); 374 irq_enter();
368 (*func)(info); 375 generic_smp_call_function_interrupt();
369 __get_cpu_var(irq_stat).irq_call_count++; 376 __get_cpu_var(irq_stat).irq_call_count++;
370 irq_exit(); 377 irq_exit();
371 378
372 if (wait) {
373 mb(); /* commit everything before setting finished */
374 atomic_inc(&call_data->finished);
375 }
376
377 return IRQ_HANDLED; 379 return IRQ_HANDLED;
378} 380}
379 381
380int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), 382static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
381 void *info, int wait)
382{ 383{
383 struct call_data_struct data; 384 irq_enter();
384 int cpus, cpu; 385 generic_smp_call_function_single_interrupt();
385 bool yield; 386 __get_cpu_var(irq_stat).irq_call_count++;
386 387 irq_exit();
387 /* Holding any lock stops cpus from going down. */
388 spin_lock(&call_lock);
389
390 cpu_clear(smp_processor_id(), mask);
391
392 cpus = cpus_weight(mask);
393 if (!cpus) {
394 spin_unlock(&call_lock);
395 return 0;
396 }
397
398 /* Can deadlock when called with interrupts disabled */
399 WARN_ON(irqs_disabled());
400
401 data.func = func;
402 data.info = info;
403 atomic_set(&data.started, 0);
404 data.wait = wait;
405 if (wait)
406 atomic_set(&data.finished, 0);
407
408 call_data = &data;
409 mb(); /* write everything before IPI */
410
411 /* Send a message to other CPUs and wait for them to respond */
412 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
413
414 /* Make sure other vcpus get a chance to run if they need to. */
415 yield = false;
416 for_each_cpu_mask(cpu, mask)
417 if (xen_vcpu_stolen(cpu))
418 yield = true;
419
420 if (yield)
421 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
422
423 /* Wait for response */
424 while (atomic_read(&data.started) != cpus ||
425 (wait && atomic_read(&data.finished) != cpus))
426 cpu_relax();
427
428 spin_unlock(&call_lock);
429 388
430 return 0; 389 return IRQ_HANDLED;
431} 390}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index f1063ae08037..a636ab5e1341 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -46,13 +46,8 @@ void xen_smp_cpus_done(unsigned int max_cpus);
46 46
47void xen_smp_send_stop(void); 47void xen_smp_send_stop(void);
48void xen_smp_send_reschedule(int cpu); 48void xen_smp_send_reschedule(int cpu);
49int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, 49void xen_smp_send_call_function_ipi(cpumask_t mask);
50 int wait); 50void xen_smp_send_call_function_single_ipi(int cpu);
51int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
52 int nonatomic, int wait);
53
54int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
55 void *info, int wait);
56 51
57 52
58/* Declare an asm function, along with symbols needed to make it 53/* Declare an asm function, along with symbols needed to make it
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h
index ea88054e03f3..a87b1320c78f 100644
--- a/include/asm-x86/hw_irq_32.h
+++ b/include/asm-x86/hw_irq_32.h
@@ -32,6 +32,7 @@ extern void (*const interrupt[NR_IRQS])(void);
32void reschedule_interrupt(void); 32void reschedule_interrupt(void);
33void invalidate_interrupt(void); 33void invalidate_interrupt(void);
34void call_function_interrupt(void); 34void call_function_interrupt(void);
35void call_function_single_interrupt(void);
35#endif 36#endif
36 37
37#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
index 0062ef390f67..fe657812d4df 100644
--- a/include/asm-x86/hw_irq_64.h
+++ b/include/asm-x86/hw_irq_64.h
@@ -68,6 +68,7 @@
68#define ERROR_APIC_VECTOR 0xfe 68#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd 69#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc 70#define CALL_FUNCTION_VECTOR 0xfc
71#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
71/* fb free - please don't readd KDB here because it's useless 72/* fb free - please don't readd KDB here because it's useless
72 (hint - think what a NMI bit does to a vector) */ 73 (hint - think what a NMI bit does to a vector) */
73#define THERMAL_APIC_VECTOR 0xfa 74#define THERMAL_APIC_VECTOR 0xfa
@@ -102,6 +103,7 @@ void spurious_interrupt(void);
102void error_interrupt(void); 103void error_interrupt(void);
103void reschedule_interrupt(void); 104void reschedule_interrupt(void);
104void call_function_interrupt(void); 105void call_function_interrupt(void);
106void call_function_single_interrupt(void);
105void irq_move_cleanup_interrupt(void); 107void irq_move_cleanup_interrupt(void);
106void invalidate_interrupt0(void); 108void invalidate_interrupt0(void);
107void invalidate_interrupt1(void); 109void invalidate_interrupt1(void);
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index bc861469bdba..9283b60a1dd2 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -13,6 +13,7 @@
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) 14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
16#endif 17#endif
17 18
18/* 19/*
diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h
index 881c63ca61ad..ed7d4955c653 100644
--- a/include/asm-x86/mach-default/irq_vectors.h
+++ b/include/asm-x86/mach-default/irq_vectors.h
@@ -48,6 +48,7 @@
48#define INVALIDATE_TLB_VECTOR 0xfd 48#define INVALIDATE_TLB_VECTOR 0xfd
49#define RESCHEDULE_VECTOR 0xfc 49#define RESCHEDULE_VECTOR 0xfc
50#define CALL_FUNCTION_VECTOR 0xfb 50#define CALL_FUNCTION_VECTOR 0xfb
51#define CALL_FUNCTION_SINGLE_VECTOR 0xfa
51 52
52#define THERMAL_APIC_VECTOR 0xf0 53#define THERMAL_APIC_VECTOR 0xf0
53/* 54/*
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
index 4a1e1e8c10b6..ae52624b5937 100644
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ b/include/asm-x86/mach-voyager/entry_arch.h
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); 23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); 24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); 25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26 26BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h
index 165421f5821c..fda57ad37b5d 100644
--- a/include/asm-x86/mach-voyager/irq_vectors.h
+++ b/include/asm-x86/mach-voyager/irq_vectors.h
@@ -33,6 +33,7 @@
33#define VIC_RESCHEDULE_CPI 4 33#define VIC_RESCHEDULE_CPI 4
34#define VIC_ENABLE_IRQ_CPI 5 34#define VIC_ENABLE_IRQ_CPI 5
35#define VIC_CALL_FUNCTION_CPI 6 35#define VIC_CALL_FUNCTION_CPI 6
36#define VIC_CALL_FUNCTION_SINGLE_CPI 7
36 37
37/* Now the QIC CPIs: Since we don't need the two initial levels, 38/* Now the QIC CPIs: Since we don't need the two initial levels,
38 * these are 2 less than the VIC CPIs */ 39 * these are 2 less than the VIC CPIs */
@@ -42,9 +43,10 @@
42#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) 43#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
43#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) 44#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
44#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) 45#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
46#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
45 47
46#define VIC_START_FAKE_CPI VIC_TIMER_CPI 48#define VIC_START_FAKE_CPI VIC_TIMER_CPI
47#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI 49#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
48 50
49/* this is the SYS_INT CPI. */ 51/* this is the SYS_INT CPI. */
50#define VIC_SYS_INT 8 52#define VIC_SYS_INT 8
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 1ebaa5cd3112..e3c24807b59b 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -59,9 +59,9 @@ struct smp_ops {
59 59
60 void (*smp_send_stop)(void); 60 void (*smp_send_stop)(void);
61 void (*smp_send_reschedule)(int cpu); 61 void (*smp_send_reschedule)(int cpu);
62 int (*smp_call_function_mask)(cpumask_t mask, 62
63 void (*func)(void *info), void *info, 63 void (*send_call_func_ipi)(cpumask_t mask);
64 int wait); 64 void (*send_call_func_single_ipi)(int cpu);
65}; 65};
66 66
67/* Globals due to paravirt */ 67/* Globals due to paravirt */
@@ -103,17 +103,22 @@ static inline void smp_send_reschedule(int cpu)
103 smp_ops.smp_send_reschedule(cpu); 103 smp_ops.smp_send_reschedule(cpu);
104} 104}
105 105
106static inline int smp_call_function_mask(cpumask_t mask, 106static inline void arch_send_call_function_single_ipi(int cpu)
107 void (*func) (void *info), void *info, 107{
108 int wait) 108 smp_ops.send_call_func_single_ipi(cpu);
109}
110
111static inline void arch_send_call_function_ipi(cpumask_t mask)
109{ 112{
110 return smp_ops.smp_call_function_mask(mask, func, info, wait); 113 smp_ops.send_call_func_ipi(mask);
111} 114}
112 115
113void native_smp_prepare_boot_cpu(void); 116void native_smp_prepare_boot_cpu(void);
114void native_smp_prepare_cpus(unsigned int max_cpus); 117void native_smp_prepare_cpus(unsigned int max_cpus);
115void native_smp_cpus_done(unsigned int max_cpus); 118void native_smp_cpus_done(unsigned int max_cpus);
116int native_cpu_up(unsigned int cpunum); 119int native_cpu_up(unsigned int cpunum);
120void native_send_call_func_ipi(cpumask_t mask);
121void native_send_call_func_single_ipi(int cpu);
117 122
118extern int __cpu_disable(void); 123extern int __cpu_disable(void);
119extern void __cpu_die(unsigned int cpu); 124extern void __cpu_die(unsigned int cpu);
@@ -202,7 +207,5 @@ extern void cpu_uninit(void);
202#endif 207#endif
203 208
204extern void smp_alloc_memory(void); 209extern void smp_alloc_memory(void);
205extern void lock_ipi_call_lock(void);
206extern void unlock_ipi_call_lock(void);
207#endif /* __ASSEMBLY__ */ 210#endif /* __ASSEMBLY__ */
208#endif 211#endif
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index 596312a7bfc9..f8d57ea1f05f 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -4,6 +4,7 @@
4enum ipi_vector { 4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
7 8
8 XEN_NR_IPIS, 9 XEN_NR_IPIS,
9}; 10};