diff options
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r-- | drivers/xen/events.c | 113 |
1 files changed, 73 insertions, 40 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 33167b43ac7..0ae1d4d7e18 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -118,6 +118,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], | |||
118 | static struct irq_chip xen_dynamic_chip; | 118 | static struct irq_chip xen_dynamic_chip; |
119 | static struct irq_chip xen_percpu_chip; | 119 | static struct irq_chip xen_percpu_chip; |
120 | static struct irq_chip xen_pirq_chip; | 120 | static struct irq_chip xen_pirq_chip; |
121 | static void enable_dynirq(struct irq_data *data); | ||
122 | static void disable_dynirq(struct irq_data *data); | ||
121 | 123 | ||
122 | /* Get info for IRQ */ | 124 | /* Get info for IRQ */ |
123 | static struct irq_info *info_for_irq(unsigned irq) | 125 | static struct irq_info *info_for_irq(unsigned irq) |
@@ -473,16 +475,6 @@ static void xen_free_irq(unsigned irq) | |||
473 | irq_free_desc(irq); | 475 | irq_free_desc(irq); |
474 | } | 476 | } |
475 | 477 | ||
476 | static void pirq_unmask_notify(int irq) | ||
477 | { | ||
478 | struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; | ||
479 | |||
480 | if (unlikely(pirq_needs_eoi(irq))) { | ||
481 | int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
482 | WARN_ON(rc); | ||
483 | } | ||
484 | } | ||
485 | |||
486 | static void pirq_query_unmask(int irq) | 478 | static void pirq_query_unmask(int irq) |
487 | { | 479 | { |
488 | struct physdev_irq_status_query irq_status; | 480 | struct physdev_irq_status_query irq_status; |
@@ -506,6 +498,29 @@ static bool probing_irq(int irq) | |||
506 | return desc && desc->action == NULL; | 498 | return desc && desc->action == NULL; |
507 | } | 499 | } |
508 | 500 | ||
501 | static void eoi_pirq(struct irq_data *data) | ||
502 | { | ||
503 | int evtchn = evtchn_from_irq(data->irq); | ||
504 | struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; | ||
505 | int rc = 0; | ||
506 | |||
507 | irq_move_irq(data); | ||
508 | |||
509 | if (VALID_EVTCHN(evtchn)) | ||
510 | clear_evtchn(evtchn); | ||
511 | |||
512 | if (pirq_needs_eoi(data->irq)) { | ||
513 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | ||
514 | WARN_ON(rc); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | static void mask_ack_pirq(struct irq_data *data) | ||
519 | { | ||
520 | disable_dynirq(data); | ||
521 | eoi_pirq(data); | ||
522 | } | ||
523 | |||
509 | static unsigned int __startup_pirq(unsigned int irq) | 524 | static unsigned int __startup_pirq(unsigned int irq) |
510 | { | 525 | { |
511 | struct evtchn_bind_pirq bind_pirq; | 526 | struct evtchn_bind_pirq bind_pirq; |
@@ -539,7 +554,7 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
539 | 554 | ||
540 | out: | 555 | out: |
541 | unmask_evtchn(evtchn); | 556 | unmask_evtchn(evtchn); |
542 | pirq_unmask_notify(irq); | 557 | eoi_pirq(irq_get_irq_data(irq)); |
543 | 558 | ||
544 | return 0; | 559 | return 0; |
545 | } | 560 | } |
@@ -579,18 +594,7 @@ static void enable_pirq(struct irq_data *data) | |||
579 | 594 | ||
580 | static void disable_pirq(struct irq_data *data) | 595 | static void disable_pirq(struct irq_data *data) |
581 | { | 596 | { |
582 | } | 597 | disable_dynirq(data); |
583 | |||
584 | static void ack_pirq(struct irq_data *data) | ||
585 | { | ||
586 | int evtchn = evtchn_from_irq(data->irq); | ||
587 | |||
588 | irq_move_irq(data); | ||
589 | |||
590 | if (VALID_EVTCHN(evtchn)) { | ||
591 | mask_evtchn(evtchn); | ||
592 | clear_evtchn(evtchn); | ||
593 | } | ||
594 | } | 598 | } |
595 | 599 | ||
596 | static int find_irq_by_gsi(unsigned gsi) | 600 | static int find_irq_by_gsi(unsigned gsi) |
@@ -639,9 +643,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
639 | if (irq < 0) | 643 | if (irq < 0) |
640 | goto out; | 644 | goto out; |
641 | 645 | ||
642 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | ||
643 | name); | ||
644 | |||
645 | irq_op.irq = irq; | 646 | irq_op.irq = irq; |
646 | irq_op.vector = 0; | 647 | irq_op.vector = 0; |
647 | 648 | ||
@@ -658,6 +659,32 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
658 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, | 659 | xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, |
659 | shareable ? PIRQ_SHAREABLE : 0); | 660 | shareable ? PIRQ_SHAREABLE : 0); |
660 | 661 | ||
662 | pirq_query_unmask(irq); | ||
663 | /* We try to use the handler with the appropriate semantic for the | ||
664 | * type of interrupt: if the interrupt doesn't need an eoi | ||
665 | * (pirq_needs_eoi returns false), we treat it like an edge | ||
666 | * triggered interrupt so we use handle_edge_irq. | ||
667 | * As a matter of fact this only happens when the corresponding | ||
668 | * physical interrupt is edge triggered or an msi. | ||
669 | * | ||
670 | * On the other hand if the interrupt needs an eoi (pirq_needs_eoi | ||
671 | * returns true) we treat it like a level triggered interrupt so we | ||
672 | * use handle_fasteoi_irq like the native code does for this kind of | ||
673 | * interrupts. | ||
674 | * Depending on the Xen version, pirq_needs_eoi might return true | ||
675 | * not only for level triggered interrupts but for edge triggered | ||
676 | * interrupts too. In any case Xen always honors the eoi mechanism, | ||
677 | * not injecting any more pirqs of the same kind if the first one | ||
678 | * hasn't received an eoi yet. Therefore using the fasteoi handler | ||
679 | * is the right choice either way. | ||
680 | */ | ||
681 | if (pirq_needs_eoi(irq)) | ||
682 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
683 | handle_fasteoi_irq, name); | ||
684 | else | ||
685 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, | ||
686 | handle_edge_irq, name); | ||
687 | |||
661 | out: | 688 | out: |
662 | spin_unlock(&irq_mapping_update_lock); | 689 | spin_unlock(&irq_mapping_update_lock); |
663 | 690 | ||
@@ -690,8 +717,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
690 | if (irq == -1) | 717 | if (irq == -1) |
691 | goto out; | 718 | goto out; |
692 | 719 | ||
693 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, | 720 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, |
694 | name); | 721 | name); |
695 | 722 | ||
696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); | 723 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); |
697 | ret = irq_set_msi_desc(irq, msidesc); | 724 | ret = irq_set_msi_desc(irq, msidesc); |
@@ -773,7 +800,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
773 | goto out; | 800 | goto out; |
774 | 801 | ||
775 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, | 802 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
776 | handle_fasteoi_irq, "event"); | 803 | handle_edge_irq, "event"); |
777 | 804 | ||
778 | xen_irq_info_evtchn_init(irq, evtchn); | 805 | xen_irq_info_evtchn_init(irq, evtchn); |
779 | } | 806 | } |
@@ -1179,9 +1206,6 @@ static void __xen_evtchn_do_upcall(void) | |||
1179 | port = (word_idx * BITS_PER_LONG) + bit_idx; | 1206 | port = (word_idx * BITS_PER_LONG) + bit_idx; |
1180 | irq = evtchn_to_irq[port]; | 1207 | irq = evtchn_to_irq[port]; |
1181 | 1208 | ||
1182 | mask_evtchn(port); | ||
1183 | clear_evtchn(port); | ||
1184 | |||
1185 | if (irq != -1) { | 1209 | if (irq != -1) { |
1186 | desc = irq_to_desc(irq); | 1210 | desc = irq_to_desc(irq); |
1187 | if (desc) | 1211 | if (desc) |
@@ -1337,10 +1361,16 @@ static void ack_dynirq(struct irq_data *data) | |||
1337 | { | 1361 | { |
1338 | int evtchn = evtchn_from_irq(data->irq); | 1362 | int evtchn = evtchn_from_irq(data->irq); |
1339 | 1363 | ||
1340 | irq_move_masked_irq(data); | 1364 | irq_move_irq(data); |
1341 | 1365 | ||
1342 | if (VALID_EVTCHN(evtchn)) | 1366 | if (VALID_EVTCHN(evtchn)) |
1343 | unmask_evtchn(evtchn); | 1367 | clear_evtchn(evtchn); |
1368 | } | ||
1369 | |||
1370 | static void mask_ack_dynirq(struct irq_data *data) | ||
1371 | { | ||
1372 | disable_dynirq(data); | ||
1373 | ack_dynirq(data); | ||
1344 | } | 1374 | } |
1345 | 1375 | ||
1346 | static int retrigger_dynirq(struct irq_data *data) | 1376 | static int retrigger_dynirq(struct irq_data *data) |
@@ -1535,7 +1565,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { | |||
1535 | .irq_mask = disable_dynirq, | 1565 | .irq_mask = disable_dynirq, |
1536 | .irq_unmask = enable_dynirq, | 1566 | .irq_unmask = enable_dynirq, |
1537 | 1567 | ||
1538 | .irq_eoi = ack_dynirq, | 1568 | .irq_ack = ack_dynirq, |
1569 | .irq_mask_ack = mask_ack_dynirq, | ||
1570 | |||
1539 | .irq_set_affinity = set_affinity_irq, | 1571 | .irq_set_affinity = set_affinity_irq, |
1540 | .irq_retrigger = retrigger_dynirq, | 1572 | .irq_retrigger = retrigger_dynirq, |
1541 | }; | 1573 | }; |
@@ -1545,14 +1577,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = { | |||
1545 | 1577 | ||
1546 | .irq_startup = startup_pirq, | 1578 | .irq_startup = startup_pirq, |
1547 | .irq_shutdown = shutdown_pirq, | 1579 | .irq_shutdown = shutdown_pirq, |
1548 | |||
1549 | .irq_enable = enable_pirq, | 1580 | .irq_enable = enable_pirq, |
1550 | .irq_unmask = enable_pirq, | ||
1551 | |||
1552 | .irq_disable = disable_pirq, | 1581 | .irq_disable = disable_pirq, |
1553 | .irq_mask = disable_pirq, | ||
1554 | 1582 | ||
1555 | .irq_ack = ack_pirq, | 1583 | .irq_mask = disable_dynirq, |
1584 | .irq_unmask = enable_dynirq, | ||
1585 | |||
1586 | .irq_ack = eoi_pirq, | ||
1587 | .irq_eoi = eoi_pirq, | ||
1588 | .irq_mask_ack = mask_ack_pirq, | ||
1556 | 1589 | ||
1557 | .irq_set_affinity = set_affinity_irq, | 1590 | .irq_set_affinity = set_affinity_irq, |
1558 | 1591 | ||