aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r--arch/x86/xen/smp.c143
1 files changed, 54 insertions, 89 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 94e69000f982..233156f39b7f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,28 +35,15 @@
35#include "xen-ops.h" 35#include "xen-ops.h"
36#include "mmu.h" 36#include "mmu.h"
37 37
38static cpumask_t xen_cpu_initialized_map; 38cpumask_t xen_cpu_initialized_map;
39static DEFINE_PER_CPU(int, resched_irq) = -1;
40static DEFINE_PER_CPU(int, callfunc_irq) = -1;
41static DEFINE_PER_CPU(int, debug_irq) = -1;
42
43/*
44 * Structure and data for smp_call_function(). This is designed to minimise
45 * static memory requirements. It also looks cleaner.
46 */
47static DEFINE_SPINLOCK(call_lock);
48 39
49struct call_data_struct { 40static DEFINE_PER_CPU(int, resched_irq);
50 void (*func) (void *info); 41static DEFINE_PER_CPU(int, callfunc_irq);
51 void *info; 42static DEFINE_PER_CPU(int, callfuncsingle_irq);
52 atomic_t started; 43static DEFINE_PER_CPU(int, debug_irq) = -1;
53 atomic_t finished;
54 int wait;
55};
56 44
57static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 45static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
58 46static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
59static struct call_data_struct *call_data;
60 47
61/* 48/*
62 * Reschedule call back. Nothing to do, 49 * Reschedule call back. Nothing to do,
@@ -65,6 +52,12 @@ static struct call_data_struct *call_data;
65 */ 52 */
66static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 53static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
67{ 54{
55#ifdef CONFIG_X86_32
56 __get_cpu_var(irq_stat).irq_resched_count++;
57#else
58 add_pda(irq_resched_count, 1);
59#endif
60
68 return IRQ_HANDLED; 61 return IRQ_HANDLED;
69} 62}
70 63
@@ -122,6 +115,17 @@ static int xen_smp_intr_init(unsigned int cpu)
122 goto fail; 115 goto fail;
123 per_cpu(debug_irq, cpu) = rc; 116 per_cpu(debug_irq, cpu) = rc;
124 117
118 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
119 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
120 cpu,
121 xen_call_function_single_interrupt,
122 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
123 callfunc_name,
124 NULL);
125 if (rc < 0)
126 goto fail;
127 per_cpu(callfuncsingle_irq, cpu) = rc;
128
125 return 0; 129 return 0;
126 130
127 fail: 131 fail:
@@ -131,6 +135,9 @@ static int xen_smp_intr_init(unsigned int cpu)
131 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 135 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
132 if (per_cpu(debug_irq, cpu) >= 0) 136 if (per_cpu(debug_irq, cpu) >= 0)
133 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 137 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
138 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
139 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
140
134 return rc; 141 return rc;
135} 142}
136 143
@@ -330,7 +337,7 @@ static void stop_self(void *v)
330 337
331void xen_smp_send_stop(void) 338void xen_smp_send_stop(void)
332{ 339{
333 smp_call_function(stop_self, NULL, 0, 0); 340 smp_call_function(stop_self, NULL, 0);
334} 341}
335 342
336void xen_smp_send_reschedule(int cpu) 343void xen_smp_send_reschedule(int cpu)
@@ -338,7 +345,6 @@ void xen_smp_send_reschedule(int cpu)
338 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 345 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
339} 346}
340 347
341
342static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 348static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
343{ 349{
344 unsigned cpu; 350 unsigned cpu;
@@ -349,83 +355,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
349 xen_send_IPI_one(cpu, vector); 355 xen_send_IPI_one(cpu, vector);
350} 356}
351 357
358void xen_smp_send_call_function_ipi(cpumask_t mask)
359{
360 int cpu;
361
362 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
363
364 /* Make sure other vcpus get a chance to run if they need to. */
365 for_each_cpu_mask(cpu, mask) {
366 if (xen_vcpu_stolen(cpu)) {
367 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
368 break;
369 }
370 }
371}
372
373void xen_smp_send_call_function_single_ipi(int cpu)
374{
375 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
376}
377
352static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 378static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
353{ 379{
354 void (*func) (void *info) = call_data->func;
355 void *info = call_data->info;
356 int wait = call_data->wait;
357
358 /*
359 * Notify initiating CPU that I've grabbed the data and am
360 * about to execute the function
361 */
362 mb();
363 atomic_inc(&call_data->started);
364 /*
365 * At this point the info structure may be out of scope unless wait==1
366 */
367 irq_enter(); 380 irq_enter();
368 (*func)(info); 381 generic_smp_call_function_interrupt();
369 __get_cpu_var(irq_stat).irq_call_count++; 382 __get_cpu_var(irq_stat).irq_call_count++;
370 irq_exit(); 383 irq_exit();
371 384
372 if (wait) {
373 mb(); /* commit everything before setting finished */
374 atomic_inc(&call_data->finished);
375 }
376
377 return IRQ_HANDLED; 385 return IRQ_HANDLED;
378} 386}
379 387
380int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), 388static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
381 void *info, int wait)
382{ 389{
383 struct call_data_struct data; 390 irq_enter();
384 int cpus, cpu; 391 generic_smp_call_function_single_interrupt();
385 bool yield; 392 __get_cpu_var(irq_stat).irq_call_count++;
386 393 irq_exit();
387 /* Holding any lock stops cpus from going down. */
388 spin_lock(&call_lock);
389
390 cpu_clear(smp_processor_id(), mask);
391
392 cpus = cpus_weight(mask);
393 if (!cpus) {
394 spin_unlock(&call_lock);
395 return 0;
396 }
397
398 /* Can deadlock when called with interrupts disabled */
399 WARN_ON(irqs_disabled());
400
401 data.func = func;
402 data.info = info;
403 atomic_set(&data.started, 0);
404 data.wait = wait;
405 if (wait)
406 atomic_set(&data.finished, 0);
407
408 call_data = &data;
409 mb(); /* write everything before IPI */
410
411 /* Send a message to other CPUs and wait for them to respond */
412 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
413
414 /* Make sure other vcpus get a chance to run if they need to. */
415 yield = false;
416 for_each_cpu_mask(cpu, mask)
417 if (xen_vcpu_stolen(cpu))
418 yield = true;
419
420 if (yield)
421 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
422
423 /* Wait for response */
424 while (atomic_read(&data.started) != cpus ||
425 (wait && atomic_read(&data.finished) != cpus))
426 cpu_relax();
427
428 spin_unlock(&call_lock);
429 394
430 return 0; 395 return IRQ_HANDLED;
431} 396}