aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c207
1 files changed, 24 insertions, 183 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0be4df39e95..44490de5aec 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -31,16 +31,13 @@
31#include <linux/irqnr.h> 31#include <linux/irqnr.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33 33
34#ifdef CONFIG_X86
35#include <asm/desc.h> 34#include <asm/desc.h>
36#include <asm/ptrace.h> 35#include <asm/ptrace.h>
37#include <asm/irq.h> 36#include <asm/irq.h>
38#include <asm/idle.h> 37#include <asm/idle.h>
39#include <asm/io_apic.h> 38#include <asm/io_apic.h>
40#include <asm/xen/page.h>
41#include <asm/xen/pci.h>
42#endif
43#include <asm/sync_bitops.h> 39#include <asm/sync_bitops.h>
40#include <asm/xen/pci.h>
44#include <asm/xen/hypercall.h> 41#include <asm/xen/hypercall.h>
45#include <asm/xen/hypervisor.h> 42#include <asm/xen/hypervisor.h>
46 43
@@ -52,9 +49,6 @@
52#include <xen/interface/event_channel.h> 49#include <xen/interface/event_channel.h>
53#include <xen/interface/hvm/hvm_op.h> 50#include <xen/interface/hvm/hvm_op.h>
54#include <xen/interface/hvm/params.h> 51#include <xen/interface/hvm/params.h>
55#include <xen/interface/physdev.h>
56#include <xen/interface/sched.h>
57#include <asm/hw_irq.h>
58 52
59/* 53/*
60 * This lock protects updates to the following mapping and reference-count 54 * This lock protects updates to the following mapping and reference-count
@@ -91,9 +85,9 @@ enum xen_irq_type {
91 * IPI - IPI vector 85 * IPI - IPI vector
92 * EVTCHN - 86 * EVTCHN -
93 */ 87 */
94struct irq_info { 88struct irq_info
89{
95 struct list_head list; 90 struct list_head list;
96 int refcnt;
97 enum xen_irq_type type; /* type */ 91 enum xen_irq_type type; /* type */
98 unsigned irq; 92 unsigned irq;
99 unsigned short evtchn; /* event channel */ 93 unsigned short evtchn; /* event channel */
@@ -115,10 +109,6 @@ struct irq_info {
115#define PIRQ_SHAREABLE (1 << 1) 109#define PIRQ_SHAREABLE (1 << 1)
116 110
117static int *evtchn_to_irq; 111static int *evtchn_to_irq;
118#ifdef CONFIG_X86
119static unsigned long *pirq_eoi_map;
120#endif
121static bool (*pirq_needs_eoi)(unsigned irq);
122 112
123static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], 113static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
124 cpu_evtchn_mask); 114 cpu_evtchn_mask);
@@ -279,16 +269,10 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
279 return ret; 269 return ret;
280} 270}
281 271
282#ifdef CONFIG_X86 272static bool pirq_needs_eoi(unsigned irq)
283static bool pirq_check_eoi_map(unsigned irq)
284{
285 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
286}
287#endif
288
289static bool pirq_needs_eoi_flag(unsigned irq)
290{ 273{
291 struct irq_info *info = info_for_irq(irq); 274 struct irq_info *info = info_for_irq(irq);
275
292 BUG_ON(info->type != IRQT_PIRQ); 276 BUG_ON(info->type != IRQT_PIRQ);
293 277
294 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 278 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
@@ -298,9 +282,9 @@ static inline unsigned long active_evtchns(unsigned int cpu,
298 struct shared_info *sh, 282 struct shared_info *sh,
299 unsigned int idx) 283 unsigned int idx)
300{ 284{
301 return sh->evtchn_pending[idx] & 285 return (sh->evtchn_pending[idx] &
302 per_cpu(cpu_evtchn_mask, cpu)[idx] & 286 per_cpu(cpu_evtchn_mask, cpu)[idx] &
303 ~sh->evtchn_mask[idx]; 287 ~sh->evtchn_mask[idx]);
304} 288}
305 289
306static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 290static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
@@ -382,22 +366,11 @@ static void unmask_evtchn(int port)
382{ 366{
383 struct shared_info *s = HYPERVISOR_shared_info; 367 struct shared_info *s = HYPERVISOR_shared_info;
384 unsigned int cpu = get_cpu(); 368 unsigned int cpu = get_cpu();
385 int do_hypercall = 0, evtchn_pending = 0;
386 369
387 BUG_ON(!irqs_disabled()); 370 BUG_ON(!irqs_disabled());
388 371
389 if (unlikely((cpu != cpu_from_evtchn(port)))) 372 /* Slow path (hypercall) if this is a non-local port. */
390 do_hypercall = 1; 373 if (unlikely(cpu != cpu_from_evtchn(port))) {
391 else
392 evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]);
393
394 if (unlikely(evtchn_pending && xen_hvm_domain()))
395 do_hypercall = 1;
396
397 /* Slow path (hypercall) if this is a non-local port or if this is
398 * an hvm domain and an event is pending (hvm domains don't have
399 * their own implementation of irq_enable). */
400 if (do_hypercall) {
401 struct evtchn_unmask unmask = { .port = port }; 374 struct evtchn_unmask unmask = { .port = port };
402 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 375 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
403 } else { 376 } else {
@@ -410,7 +383,7 @@ static void unmask_evtchn(int port)
410 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose 383 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
411 * the interrupt edge' if the channel is masked. 384 * the interrupt edge' if the channel is masked.
412 */ 385 */
413 if (evtchn_pending && 386 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
414 !sync_test_and_set_bit(port / BITS_PER_LONG, 387 !sync_test_and_set_bit(port / BITS_PER_LONG,
415 &vcpu_info->evtchn_pending_sel)) 388 &vcpu_info->evtchn_pending_sel))
416 vcpu_info->evtchn_upcall_pending = 1; 389 vcpu_info->evtchn_upcall_pending = 1;
@@ -434,7 +407,6 @@ static void xen_irq_init(unsigned irq)
434 panic("Unable to allocate metadata for IRQ%d\n", irq); 407 panic("Unable to allocate metadata for IRQ%d\n", irq);
435 408
436 info->type = IRQT_UNBOUND; 409 info->type = IRQT_UNBOUND;
437 info->refcnt = -1;
438 410
439 irq_set_handler_data(irq, info); 411 irq_set_handler_data(irq, info);
440 412
@@ -460,8 +432,7 @@ static int __must_check xen_allocate_irq_dynamic(void)
460 432
461 irq = irq_alloc_desc_from(first, -1); 433 irq = irq_alloc_desc_from(first, -1);
462 434
463 if (irq >= 0) 435 xen_irq_init(irq);
464 xen_irq_init(irq);
465 436
466 return irq; 437 return irq;
467} 438}
@@ -498,8 +469,6 @@ static void xen_free_irq(unsigned irq)
498 469
499 irq_set_handler_data(irq, NULL); 470 irq_set_handler_data(irq, NULL);
500 471
501 WARN_ON(info->refcnt > 0);
502
503 kfree(info); 472 kfree(info);
504 473
505 /* Legacy IRQ descriptors are managed by the arch. */ 474 /* Legacy IRQ descriptors are managed by the arch. */
@@ -631,7 +600,7 @@ static void disable_pirq(struct irq_data *data)
631 disable_dynirq(data); 600 disable_dynirq(data);
632} 601}
633 602
634int xen_irq_from_gsi(unsigned gsi) 603static int find_irq_by_gsi(unsigned gsi)
635{ 604{
636 struct irq_info *info; 605 struct irq_info *info;
637 606
@@ -645,7 +614,6 @@ int xen_irq_from_gsi(unsigned gsi)
645 614
646 return -1; 615 return -1;
647} 616}
648EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
649 617
650/* 618/*
651 * Do not make any assumptions regarding the relationship between the 619 * Do not make any assumptions regarding the relationship between the
@@ -665,11 +633,11 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
665 633
666 mutex_lock(&irq_mapping_update_lock); 634 mutex_lock(&irq_mapping_update_lock);
667 635
668 irq = xen_irq_from_gsi(gsi); 636 irq = find_irq_by_gsi(gsi);
669 if (irq != -1) { 637 if (irq != -1) {
670 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", 638 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
671 irq, gsi); 639 irq, gsi);
672 goto out; 640 goto out; /* XXX need refcount? */
673 } 641 }
674 642
675 irq = xen_allocate_irq_gsi(gsi); 643 irq = xen_allocate_irq_gsi(gsi);
@@ -745,7 +713,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
745 mutex_lock(&irq_mapping_update_lock); 713 mutex_lock(&irq_mapping_update_lock);
746 714
747 irq = xen_allocate_irq_dynamic(); 715 irq = xen_allocate_irq_dynamic();
748 if (irq < 0) 716 if (irq == -1)
749 goto out; 717 goto out;
750 718
751 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, 719 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
@@ -761,7 +729,7 @@ out:
761error_irq: 729error_irq:
762 mutex_unlock(&irq_mapping_update_lock); 730 mutex_unlock(&irq_mapping_update_lock);
763 xen_free_irq(irq); 731 xen_free_irq(irq);
764 return ret; 732 return -1;
765} 733}
766#endif 734#endif
767 735
@@ -811,7 +779,7 @@ int xen_irq_from_pirq(unsigned pirq)
811 mutex_lock(&irq_mapping_update_lock); 779 mutex_lock(&irq_mapping_update_lock);
812 780
813 list_for_each_entry(info, &xen_irq_list_head, list) { 781 list_for_each_entry(info, &xen_irq_list_head, list) {
814 if (info->type != IRQT_PIRQ) 782 if (info == NULL || info->type != IRQT_PIRQ)
815 continue; 783 continue;
816 irq = info->irq; 784 irq = info->irq;
817 if (info->u.pirq.pirq == pirq) 785 if (info->u.pirq.pirq == pirq)
@@ -847,11 +815,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
847 handle_edge_irq, "event"); 815 handle_edge_irq, "event");
848 816
849 xen_irq_info_evtchn_init(irq, evtchn); 817 xen_irq_info_evtchn_init(irq, evtchn);
850 } else {
851 struct irq_info *info = info_for_irq(irq);
852 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
853 } 818 }
854 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
855 819
856out: 820out:
857 mutex_unlock(&irq_mapping_update_lock); 821 mutex_unlock(&irq_mapping_update_lock);
@@ -886,9 +850,6 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
886 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 850 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
887 851
888 bind_evtchn_to_cpu(evtchn, cpu); 852 bind_evtchn_to_cpu(evtchn, cpu);
889 } else {
890 struct irq_info *info = info_for_irq(irq);
891 WARN_ON(info == NULL || info->type != IRQT_IPI);
892 } 853 }
893 854
894 out: 855 out:
@@ -911,32 +872,11 @@ static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
911 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); 872 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
912} 873}
913 874
914static int find_virq(unsigned int virq, unsigned int cpu)
915{
916 struct evtchn_status status;
917 int port, rc = -ENOENT;
918
919 memset(&status, 0, sizeof(status));
920 for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
921 status.dom = DOMID_SELF;
922 status.port = port;
923 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
924 if (rc < 0)
925 continue;
926 if (status.status != EVTCHNSTAT_virq)
927 continue;
928 if (status.u.virq == virq && status.vcpu == cpu) {
929 rc = port;
930 break;
931 }
932 }
933 return rc;
934}
935 875
936int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 876int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
937{ 877{
938 struct evtchn_bind_virq bind_virq; 878 struct evtchn_bind_virq bind_virq;
939 int evtchn, irq, ret; 879 int evtchn, irq;
940 880
941 mutex_lock(&irq_mapping_update_lock); 881 mutex_lock(&irq_mapping_update_lock);
942 882
@@ -952,23 +892,14 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
952 892
953 bind_virq.virq = virq; 893 bind_virq.virq = virq;
954 bind_virq.vcpu = cpu; 894 bind_virq.vcpu = cpu;
955 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 895 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
956 &bind_virq); 896 &bind_virq) != 0)
957 if (ret == 0) 897 BUG();
958 evtchn = bind_virq.port; 898 evtchn = bind_virq.port;
959 else {
960 if (ret == -EEXIST)
961 ret = find_virq(virq, cpu);
962 BUG_ON(ret < 0);
963 evtchn = ret;
964 }
965 899
966 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 900 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
967 901
968 bind_evtchn_to_cpu(evtchn, cpu); 902 bind_evtchn_to_cpu(evtchn, cpu);
969 } else {
970 struct irq_info *info = info_for_irq(irq);
971 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
972 } 903 }
973 904
974out: 905out:
@@ -981,16 +912,9 @@ static void unbind_from_irq(unsigned int irq)
981{ 912{
982 struct evtchn_close close; 913 struct evtchn_close close;
983 int evtchn = evtchn_from_irq(irq); 914 int evtchn = evtchn_from_irq(irq);
984 struct irq_info *info = irq_get_handler_data(irq);
985 915
986 mutex_lock(&irq_mapping_update_lock); 916 mutex_lock(&irq_mapping_update_lock);
987 917
988 if (info->refcnt > 0) {
989 info->refcnt--;
990 if (info->refcnt != 0)
991 goto done;
992 }
993
994 if (VALID_EVTCHN(evtchn)) { 918 if (VALID_EVTCHN(evtchn)) {
995 close.port = evtchn; 919 close.port = evtchn;
996 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 920 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
@@ -1019,7 +943,6 @@ static void unbind_from_irq(unsigned int irq)
1019 943
1020 xen_free_irq(irq); 944 xen_free_irq(irq);
1021 945
1022 done:
1023 mutex_unlock(&irq_mapping_update_lock); 946 mutex_unlock(&irq_mapping_update_lock);
1024} 947}
1025 948
@@ -1115,69 +1038,6 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1115} 1038}
1116EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 1039EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1117 1040
1118int evtchn_make_refcounted(unsigned int evtchn)
1119{
1120 int irq = evtchn_to_irq[evtchn];
1121 struct irq_info *info;
1122
1123 if (irq == -1)
1124 return -ENOENT;
1125
1126 info = irq_get_handler_data(irq);
1127
1128 if (!info)
1129 return -ENOENT;
1130
1131 WARN_ON(info->refcnt != -1);
1132
1133 info->refcnt = 1;
1134
1135 return 0;
1136}
1137EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1138
1139int evtchn_get(unsigned int evtchn)
1140{
1141 int irq;
1142 struct irq_info *info;
1143 int err = -ENOENT;
1144
1145 if (evtchn >= NR_EVENT_CHANNELS)
1146 return -EINVAL;
1147
1148 mutex_lock(&irq_mapping_update_lock);
1149
1150 irq = evtchn_to_irq[evtchn];
1151 if (irq == -1)
1152 goto done;
1153
1154 info = irq_get_handler_data(irq);
1155
1156 if (!info)
1157 goto done;
1158
1159 err = -EINVAL;
1160 if (info->refcnt <= 0)
1161 goto done;
1162
1163 info->refcnt++;
1164 err = 0;
1165 done:
1166 mutex_unlock(&irq_mapping_update_lock);
1167
1168 return err;
1169}
1170EXPORT_SYMBOL_GPL(evtchn_get);
1171
1172void evtchn_put(unsigned int evtchn)
1173{
1174 int irq = evtchn_to_irq[evtchn];
1175 if (WARN_ON(irq == -1))
1176 return;
1177 unbind_from_irq(irq);
1178}
1179EXPORT_SYMBOL_GPL(evtchn_put);
1180
1181void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 1041void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1182{ 1042{
1183 int irq = per_cpu(ipi_to_irq, cpu)[vector]; 1043 int irq = per_cpu(ipi_to_irq, cpu)[vector];
@@ -1292,7 +1152,7 @@ static void __xen_evtchn_do_upcall(void)
1292 int cpu = get_cpu(); 1152 int cpu = get_cpu();
1293 struct shared_info *s = HYPERVISOR_shared_info; 1153 struct shared_info *s = HYPERVISOR_shared_info;
1294 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1154 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1295 unsigned count; 1155 unsigned count;
1296 1156
1297 do { 1157 do {
1298 unsigned long pending_words; 1158 unsigned long pending_words;
@@ -1395,10 +1255,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
1395{ 1255{
1396 struct pt_regs *old_regs = set_irq_regs(regs); 1256 struct pt_regs *old_regs = set_irq_regs(regs);
1397 1257
1398 irq_enter();
1399#ifdef CONFIG_X86
1400 exit_idle(); 1258 exit_idle();
1401#endif 1259 irq_enter();
1402 1260
1403 __xen_evtchn_do_upcall(); 1261 __xen_evtchn_do_upcall();
1404 1262
@@ -1812,7 +1670,6 @@ void __init xen_init_IRQ(void)
1812 1670
1813 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), 1671 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1814 GFP_KERNEL); 1672 GFP_KERNEL);
1815 BUG_ON(!evtchn_to_irq);
1816 for (i = 0; i < NR_EVENT_CHANNELS; i++) 1673 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1817 evtchn_to_irq[i] = -1; 1674 evtchn_to_irq[i] = -1;
1818 1675
@@ -1822,9 +1679,6 @@ void __init xen_init_IRQ(void)
1822 for (i = 0; i < NR_EVENT_CHANNELS; i++) 1679 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1823 mask_evtchn(i); 1680 mask_evtchn(i);
1824 1681
1825 pirq_needs_eoi = pirq_needs_eoi_flag;
1826
1827#ifdef CONFIG_X86
1828 if (xen_hvm_domain()) { 1682 if (xen_hvm_domain()) {
1829 xen_callback_vector(); 1683 xen_callback_vector();
1830 native_init_IRQ(); 1684 native_init_IRQ();
@@ -1832,21 +1686,8 @@ void __init xen_init_IRQ(void)
1832 * __acpi_register_gsi can point at the right function */ 1686 * __acpi_register_gsi can point at the right function */
1833 pci_xen_hvm_init(); 1687 pci_xen_hvm_init();
1834 } else { 1688 } else {
1835 int rc;
1836 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1837
1838 irq_ctx_init(smp_processor_id()); 1689 irq_ctx_init(smp_processor_id());
1839 if (xen_initial_domain()) 1690 if (xen_initial_domain())
1840 pci_xen_initial_domain(); 1691 pci_xen_initial_domain();
1841
1842 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1843 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1844 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1845 if (rc != 0) {
1846 free_page((unsigned long) pirq_eoi_map);
1847 pirq_eoi_map = NULL;
1848 } else
1849 pirq_needs_eoi = pirq_check_eoi_map;
1850 } 1692 }
1851#endif
1852} 1693}