aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/smp.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-26 05:21:54 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:21:54 -0400
commit3b16cf874861436725c43ba0b68bdd799297be7c (patch)
tree8e48647e3dce5dde6917f260f93c4b9f19945c55 /arch/x86/xen/smp.c
parent3d4422332711ef48ef0f132f1fcbfcbd56c7f3d1 (diff)
x86: convert to generic helpers for IPI function calls
This converts x86, x86-64, and xen to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r--arch/x86/xen/smp.c133
1 files changed, 46 insertions, 87 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 94e69000f982..b3786e749b8e 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -36,27 +36,14 @@
36#include "mmu.h" 36#include "mmu.h"
37 37
38static cpumask_t xen_cpu_initialized_map; 38static cpumask_t xen_cpu_initialized_map;
39static DEFINE_PER_CPU(int, resched_irq) = -1;
40static DEFINE_PER_CPU(int, callfunc_irq) = -1;
41static DEFINE_PER_CPU(int, debug_irq) = -1;
42
43/*
44 * Structure and data for smp_call_function(). This is designed to minimise
45 * static memory requirements. It also looks cleaner.
46 */
47static DEFINE_SPINLOCK(call_lock);
48 39
49struct call_data_struct { 40static DEFINE_PER_CPU(int, resched_irq);
50 void (*func) (void *info); 41static DEFINE_PER_CPU(int, callfunc_irq);
51 void *info; 42static DEFINE_PER_CPU(int, callfuncsingle_irq);
52 atomic_t started; 43static DEFINE_PER_CPU(int, debug_irq) = -1;
53 atomic_t finished;
54 int wait;
55};
56 44
57static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 45static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
58 46static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
59static struct call_data_struct *call_data;
60 47
61/* 48/*
62 * Reschedule call back. Nothing to do, 49 * Reschedule call back. Nothing to do,
@@ -122,6 +109,17 @@ static int xen_smp_intr_init(unsigned int cpu)
122 goto fail; 109 goto fail;
123 per_cpu(debug_irq, cpu) = rc; 110 per_cpu(debug_irq, cpu) = rc;
124 111
112 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
113 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
114 cpu,
115 xen_call_function_single_interrupt,
116 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
117 callfunc_name,
118 NULL);
119 if (rc < 0)
120 goto fail;
121 per_cpu(callfuncsingle_irq, cpu) = rc;
122
125 return 0; 123 return 0;
126 124
127 fail: 125 fail:
@@ -131,6 +129,9 @@ static int xen_smp_intr_init(unsigned int cpu)
131 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 129 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
132 if (per_cpu(debug_irq, cpu) >= 0) 130 if (per_cpu(debug_irq, cpu) >= 0)
133 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 131 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
132 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
133 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
134
134 return rc; 135 return rc;
135} 136}
136 137
@@ -338,7 +339,6 @@ void xen_smp_send_reschedule(int cpu)
338 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 339 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
339} 340}
340 341
341
342static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 342static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
343{ 343{
344 unsigned cpu; 344 unsigned cpu;
@@ -349,83 +349,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
349 xen_send_IPI_one(cpu, vector); 349 xen_send_IPI_one(cpu, vector);
350} 350}
351 351
352void xen_smp_send_call_function_ipi(cpumask_t mask)
353{
354 int cpu;
355
356 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
357
358 /* Make sure other vcpus get a chance to run if they need to. */
359 for_each_cpu_mask(cpu, mask) {
360 if (xen_vcpu_stolen(cpu)) {
361 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
362 break;
363 }
364 }
365}
366
367void xen_smp_send_call_function_single_ipi(int cpu)
368{
369 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
370}
371
352static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 372static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
353{ 373{
354 void (*func) (void *info) = call_data->func;
355 void *info = call_data->info;
356 int wait = call_data->wait;
357
358 /*
359 * Notify initiating CPU that I've grabbed the data and am
360 * about to execute the function
361 */
362 mb();
363 atomic_inc(&call_data->started);
364 /*
365 * At this point the info structure may be out of scope unless wait==1
366 */
367 irq_enter(); 374 irq_enter();
368 (*func)(info); 375 generic_smp_call_function_interrupt();
369 __get_cpu_var(irq_stat).irq_call_count++; 376 __get_cpu_var(irq_stat).irq_call_count++;
370 irq_exit(); 377 irq_exit();
371 378
372 if (wait) {
373 mb(); /* commit everything before setting finished */
374 atomic_inc(&call_data->finished);
375 }
376
377 return IRQ_HANDLED; 379 return IRQ_HANDLED;
378} 380}
379 381
380int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), 382static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
381 void *info, int wait)
382{ 383{
383 struct call_data_struct data; 384 irq_enter();
384 int cpus, cpu; 385 generic_smp_call_function_single_interrupt();
385 bool yield; 386 __get_cpu_var(irq_stat).irq_call_count++;
386 387 irq_exit();
387 /* Holding any lock stops cpus from going down. */
388 spin_lock(&call_lock);
389
390 cpu_clear(smp_processor_id(), mask);
391
392 cpus = cpus_weight(mask);
393 if (!cpus) {
394 spin_unlock(&call_lock);
395 return 0;
396 }
397
398 /* Can deadlock when called with interrupts disabled */
399 WARN_ON(irqs_disabled());
400
401 data.func = func;
402 data.info = info;
403 atomic_set(&data.started, 0);
404 data.wait = wait;
405 if (wait)
406 atomic_set(&data.finished, 0);
407
408 call_data = &data;
409 mb(); /* write everything before IPI */
410
411 /* Send a message to other CPUs and wait for them to respond */
412 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
413
414 /* Make sure other vcpus get a chance to run if they need to. */
415 yield = false;
416 for_each_cpu_mask(cpu, mask)
417 if (xen_vcpu_stolen(cpu))
418 yield = true;
419
420 if (yield)
421 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
422
423 /* Wait for response */
424 while (atomic_read(&data.started) != cpus ||
425 (wait && atomic_read(&data.finished) != cpus))
426 cpu_relax();
427
428 spin_unlock(&call_lock);
429 388
430 return 0; 389 return IRQ_HANDLED;
431} 390}