diff options
| author | Ian Campbell <ian.campbell@citrix.com> | 2011-01-11 12:20:15 -0500 |
|---|---|---|
| committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-03-03 11:56:59 -0500 |
| commit | c9df1ce585e3bb5a2f101c1d87381b285a9f962f (patch) | |
| tree | 33a0cad7b28df7fe8eab3410b195a29273eb2c68 | |
| parent | cbf6aa89fc52c5253ee141d53eeb73147eb37ac0 (diff) | |
xen: events: add xen_allocate_irq_{dynamic, gsi} and xen_free_irq
This is neater than open-coded calls to irq_alloc_desc_at and
irq_free_desc.
No intended behavioural change.
Note that we previously were not checking the return value of
irq_alloc_desc_at which would be failing for GSI<NR_IRQS_LEGACY
because the core architecture code has already allocated those for
us. Hence the additional check against NR_IRQS_LEGACY in
xen_allocate_irq_gsi.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
| -rw-r--r-- | drivers/xen/events.c | 53 |
1 files changed, 35 insertions, 18 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 1ae775742325..81a53eb6cd1d 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -387,7 +387,7 @@ static int get_nr_hw_irqs(void) | |||
| 387 | return ret; | 387 | return ret; |
| 388 | } | 388 | } |
| 389 | 389 | ||
| 390 | static int find_unbound_irq(void) | 390 | static int xen_allocate_irq_dynamic(void) |
| 391 | { | 391 | { |
| 392 | struct irq_data *data; | 392 | struct irq_data *data; |
| 393 | int irq, res; | 393 | int irq, res; |
| @@ -436,6 +436,30 @@ static bool identity_mapped_irq(unsigned irq) | |||
| 436 | return irq < get_nr_hw_irqs(); | 436 | return irq < get_nr_hw_irqs(); |
| 437 | } | 437 | } |
| 438 | 438 | ||
| 439 | static int xen_allocate_irq_gsi(unsigned gsi) | ||
| 440 | { | ||
| 441 | int irq; | ||
| 442 | |||
| 443 | if (!identity_mapped_irq(gsi) && | ||
| 444 | (xen_initial_domain() || !xen_pv_domain())) | ||
| 445 | return xen_allocate_irq_dynamic(); | ||
| 446 | |||
| 447 | /* Legacy IRQ descriptors are already allocated by the arch. */ | ||
| 448 | if (gsi < NR_IRQS_LEGACY) | ||
| 449 | return gsi; | ||
| 450 | |||
| 451 | irq = irq_alloc_desc_at(gsi, -1); | ||
| 452 | if (irq < 0) | ||
| 453 | panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); | ||
| 454 | |||
| 455 | return irq; | ||
| 456 | } | ||
| 457 | |||
| 458 | static void xen_free_irq(unsigned irq) | ||
| 459 | { | ||
| 460 | irq_free_desc(irq); | ||
| 461 | } | ||
| 462 | |||
| 439 | static void pirq_unmask_notify(int irq) | 463 | static void pirq_unmask_notify(int irq) |
| 440 | { | 464 | { |
| 441 | struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; | 465 | struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; |
| @@ -621,14 +645,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
| 621 | goto out; /* XXX need refcount? */ | 645 | goto out; /* XXX need refcount? */ |
| 622 | } | 646 | } |
| 623 | 647 | ||
| 624 | /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore | 648 | irq = xen_allocate_irq_gsi(gsi); |
| 625 | * we are using the !xen_initial_domain() to drop in the function.*/ | ||
| 626 | if (identity_mapped_irq(gsi) || (!xen_initial_domain() && | ||
| 627 | xen_pv_domain())) { | ||
| 628 | irq = gsi; | ||
| 629 | irq_alloc_desc_at(irq, -1); | ||
| 630 | } else | ||
| 631 | irq = find_unbound_irq(); | ||
| 632 | 649 | ||
| 633 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 650 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
| 634 | handle_level_irq, name); | 651 | handle_level_irq, name); |
| @@ -641,7 +658,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
| 641 | * this in the priv domain. */ | 658 | * this in the priv domain. */ |
| 642 | if (xen_initial_domain() && | 659 | if (xen_initial_domain() && |
| 643 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { | 660 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { |
| 644 | irq_free_desc(irq); | 661 | xen_free_irq(irq); |
| 645 | irq = -ENOSPC; | 662 | irq = -ENOSPC; |
| 646 | goto out; | 663 | goto out; |
| 647 | } | 664 | } |
| @@ -682,7 +699,7 @@ void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) | |||
| 682 | spin_lock(&irq_mapping_update_lock); | 699 | spin_lock(&irq_mapping_update_lock); |
| 683 | 700 | ||
| 684 | if (alloc & XEN_ALLOC_IRQ) { | 701 | if (alloc & XEN_ALLOC_IRQ) { |
| 685 | *irq = find_unbound_irq(); | 702 | *irq = xen_allocate_irq_dynamic(); |
| 686 | if (*irq == -1) | 703 | if (*irq == -1) |
| 687 | goto out; | 704 | goto out; |
| 688 | } | 705 | } |
| @@ -732,7 +749,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) | |||
| 732 | 749 | ||
| 733 | spin_lock(&irq_mapping_update_lock); | 750 | spin_lock(&irq_mapping_update_lock); |
| 734 | 751 | ||
| 735 | irq = find_unbound_irq(); | 752 | irq = xen_allocate_irq_dynamic(); |
| 736 | 753 | ||
| 737 | if (irq == -1) | 754 | if (irq == -1) |
| 738 | goto out; | 755 | goto out; |
| @@ -741,7 +758,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) | |||
| 741 | if (rc) { | 758 | if (rc) { |
| 742 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | 759 | printk(KERN_WARNING "xen map irq failed %d\n", rc); |
| 743 | 760 | ||
| 744 | irq_free_desc(irq); | 761 | xen_free_irq(irq); |
| 745 | 762 | ||
| 746 | irq = -1; | 763 | irq = -1; |
| 747 | goto out; | 764 | goto out; |
| @@ -783,7 +800,7 @@ int xen_destroy_irq(int irq) | |||
| 783 | } | 800 | } |
| 784 | irq_info[irq] = mk_unbound_info(); | 801 | irq_info[irq] = mk_unbound_info(); |
| 785 | 802 | ||
| 786 | irq_free_desc(irq); | 803 | xen_free_irq(irq); |
| 787 | 804 | ||
| 788 | out: | 805 | out: |
| 789 | spin_unlock(&irq_mapping_update_lock); | 806 | spin_unlock(&irq_mapping_update_lock); |
| @@ -814,7 +831,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
| 814 | irq = evtchn_to_irq[evtchn]; | 831 | irq = evtchn_to_irq[evtchn]; |
| 815 | 832 | ||
| 816 | if (irq == -1) { | 833 | if (irq == -1) { |
| 817 | irq = find_unbound_irq(); | 834 | irq = xen_allocate_irq_dynamic(); |
| 818 | 835 | ||
| 819 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 836 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
| 820 | handle_fasteoi_irq, "event"); | 837 | handle_fasteoi_irq, "event"); |
| @@ -839,7 +856,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
| 839 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 856 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
| 840 | 857 | ||
| 841 | if (irq == -1) { | 858 | if (irq == -1) { |
| 842 | irq = find_unbound_irq(); | 859 | irq = xen_allocate_irq_dynamic(); |
| 843 | if (irq < 0) | 860 | if (irq < 0) |
| 844 | goto out; | 861 | goto out; |
| 845 | 862 | ||
| @@ -875,7 +892,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
| 875 | irq = per_cpu(virq_to_irq, cpu)[virq]; | 892 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
| 876 | 893 | ||
| 877 | if (irq == -1) { | 894 | if (irq == -1) { |
| 878 | irq = find_unbound_irq(); | 895 | irq = xen_allocate_irq_dynamic(); |
| 879 | 896 | ||
| 880 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 897 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
| 881 | handle_percpu_irq, "virq"); | 898 | handle_percpu_irq, "virq"); |
| @@ -934,7 +951,7 @@ static void unbind_from_irq(unsigned int irq) | |||
| 934 | if (irq_info[irq].type != IRQT_UNBOUND) { | 951 | if (irq_info[irq].type != IRQT_UNBOUND) { |
| 935 | irq_info[irq] = mk_unbound_info(); | 952 | irq_info[irq] = mk_unbound_info(); |
| 936 | 953 | ||
| 937 | irq_free_desc(irq); | 954 | xen_free_irq(irq); |
| 938 | } | 955 | } |
| 939 | 956 | ||
| 940 | spin_unlock(&irq_mapping_update_lock); | 957 | spin_unlock(&irq_mapping_update_lock); |
