aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r--arch/x86/xen/smp.c112
1 files changed, 108 insertions, 4 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3700945ed0d5..afb250d22a6b 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/irq_work.h>
19 20
20#include <asm/paravirt.h> 21#include <asm/paravirt.h>
21#include <asm/desc.h> 22#include <asm/desc.h>
@@ -41,10 +42,12 @@ cpumask_var_t xen_cpu_initialized_map;
41static DEFINE_PER_CPU(int, xen_resched_irq); 42static DEFINE_PER_CPU(int, xen_resched_irq);
42static DEFINE_PER_CPU(int, xen_callfunc_irq); 43static DEFINE_PER_CPU(int, xen_callfunc_irq);
43static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 44static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45static DEFINE_PER_CPU(int, xen_irq_work);
44static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 46static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
45 47
46static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 48static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
47static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 49static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
48 51
49/* 52/*
50 * Reschedule call back. 53 * Reschedule call back.
@@ -143,6 +146,17 @@ static int xen_smp_intr_init(unsigned int cpu)
143 goto fail; 146 goto fail;
144 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 147 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
145 148
149 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
150 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
151 cpu,
152 xen_irq_work_interrupt,
153 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
154 callfunc_name,
155 NULL);
156 if (rc < 0)
157 goto fail;
158 per_cpu(xen_irq_work, cpu) = rc;
159
146 return 0; 160 return 0;
147 161
148 fail: 162 fail:
@@ -155,6 +169,8 @@ static int xen_smp_intr_init(unsigned int cpu)
155 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) 169 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
156 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), 170 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
157 NULL); 171 NULL);
172 if (per_cpu(xen_irq_work, cpu) >= 0)
173 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
158 174
159 return rc; 175 return rc;
160} 176}
@@ -407,6 +423,7 @@ static void xen_cpu_die(unsigned int cpu)
407 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 423 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
408 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 424 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
409 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 425 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
426 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
410 xen_uninit_lock_cpu(cpu); 427 xen_uninit_lock_cpu(cpu);
411 xen_teardown_timer(cpu); 428 xen_teardown_timer(cpu);
412 429
@@ -469,8 +486,8 @@ static void xen_smp_send_reschedule(int cpu)
469 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 486 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
470} 487}
471 488
472static void xen_send_IPI_mask(const struct cpumask *mask, 489static void __xen_send_IPI_mask(const struct cpumask *mask,
473 enum ipi_vector vector) 490 int vector)
474{ 491{
475 unsigned cpu; 492 unsigned cpu;
476 493
@@ -482,7 +499,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
482{ 499{
483 int cpu; 500 int cpu;
484 501
485 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 502 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
486 503
487 /* Make sure other vcpus get a chance to run if they need to. */ 504 /* Make sure other vcpus get a chance to run if they need to. */
488 for_each_cpu(cpu, mask) { 505 for_each_cpu(cpu, mask) {
@@ -495,10 +512,86 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
495 512
496static void xen_smp_send_call_function_single_ipi(int cpu) 513static void xen_smp_send_call_function_single_ipi(int cpu)
497{ 514{
498 xen_send_IPI_mask(cpumask_of(cpu), 515 __xen_send_IPI_mask(cpumask_of(cpu),
499 XEN_CALL_FUNCTION_SINGLE_VECTOR); 516 XEN_CALL_FUNCTION_SINGLE_VECTOR);
500} 517}
501 518
519static inline int xen_map_vector(int vector)
520{
521 int xen_vector;
522
523 switch (vector) {
524 case RESCHEDULE_VECTOR:
525 xen_vector = XEN_RESCHEDULE_VECTOR;
526 break;
527 case CALL_FUNCTION_VECTOR:
528 xen_vector = XEN_CALL_FUNCTION_VECTOR;
529 break;
530 case CALL_FUNCTION_SINGLE_VECTOR:
531 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
532 break;
533 case IRQ_WORK_VECTOR:
534 xen_vector = XEN_IRQ_WORK_VECTOR;
535 break;
536 default:
537 xen_vector = -1;
538 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
539 vector);
540 }
541
542 return xen_vector;
543}
544
545void xen_send_IPI_mask(const struct cpumask *mask,
546 int vector)
547{
548 int xen_vector = xen_map_vector(vector);
549
550 if (xen_vector >= 0)
551 __xen_send_IPI_mask(mask, xen_vector);
552}
553
554void xen_send_IPI_all(int vector)
555{
556 int xen_vector = xen_map_vector(vector);
557
558 if (xen_vector >= 0)
559 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
560}
561
562void xen_send_IPI_self(int vector)
563{
564 int xen_vector = xen_map_vector(vector);
565
566 if (xen_vector >= 0)
567 xen_send_IPI_one(smp_processor_id(), xen_vector);
568}
569
570void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
571 int vector)
572{
573 unsigned cpu;
574 unsigned int this_cpu = smp_processor_id();
575
576 if (!(num_online_cpus() > 1))
577 return;
578
579 for_each_cpu_and(cpu, mask, cpu_online_mask) {
580 if (this_cpu == cpu)
581 continue;
582
583 xen_smp_send_call_function_single_ipi(cpu);
584 }
585}
586
587void xen_send_IPI_allbutself(int vector)
588{
589 int xen_vector = xen_map_vector(vector);
590
591 if (xen_vector >= 0)
592 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
593}
594
502static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 595static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
503{ 596{
504 irq_enter(); 597 irq_enter();
@@ -519,6 +612,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
519 return IRQ_HANDLED; 612 return IRQ_HANDLED;
520} 613}
521 614
615static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
616{
617 irq_enter();
618 irq_work_run();
619 inc_irq_stat(apic_irq_work_irqs);
620 irq_exit();
621
622 return IRQ_HANDLED;
623}
624
522static const struct smp_ops xen_smp_ops __initconst = { 625static const struct smp_ops xen_smp_ops __initconst = {
523 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 626 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
524 .smp_prepare_cpus = xen_smp_prepare_cpus, 627 .smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -565,6 +668,7 @@ static void xen_hvm_cpu_die(unsigned int cpu)
565 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 668 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
566 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 669 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
567 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 670 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
671 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
568 native_cpu_die(cpu); 672 native_cpu_die(cpu);
569} 673}
570 674