aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 12:01:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 12:01:01 -0500
commit9e66645d72d3c395da92b0f8855c787f4b5f0e89 (patch)
tree61b94adb6c32340c45b6d984837556b6b845e983
parentecb50f0afd35a51ef487e8a54b976052eb03d729 (diff)
parent74faaf7aa64c76b60db0f5c994fd43a46be772ce (diff)
Merge branch 'irq-irqdomain-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq domain updates from Thomas Gleixner: "The real interesting irq updates: - Support for hierarchical irq domains: For complex interrupt routing scenarios where more than one interrupt related chip is involved we had no proper representation in the generic interrupt infrastructure so far. That made people implement rather ugly constructs in their nested irq chip implementations. The main offenders are x86 and arm/gic. To distangle that mess we have now hierarchical irqdomains which seperate the various interrupt chips and connect them via the hierarchical domains. That keeps the domain specific details internal to the particular hierarchy level and removes the criss/cross referencing of chip internals. The resulting hierarchy for a complex x86 system will look like this: vector mapped: 74 msi-0 mapped: 2 dmar-ir-1 mapped: 69 ioapic-1 mapped: 4 ioapic-0 mapped: 20 pci-msi-2 mapped: 45 dmar-ir-0 mapped: 3 ioapic-2 mapped: 1 pci-msi-1 mapped: 2 htirq mapped: 0 Neither ioapic nor pci-msi know about the dmar interrupt remapping between themself and the vector domain. If interrupt remapping is disabled ioapic and pci-msi become direct childs of the vector domain. In hindsight we should have done that years ago, but in hindsight we always know better :) - Support for generic MSI interrupt domain handling We have more and more non PCI related MSI interrupts, so providing a generic infrastructure for this is better than having all affected architectures implementing their own private hacks. - Support for PCI-MSI interrupt domain handling, based on the generic MSI support. This part carries the pci/msi branch from Bjorn Helgaas pci tree to avoid a massive conflict. The PCI/MSI parts are acked by Bjorn. I have two more branches on top of this. The full conversion of x86 to hierarchical domains and a partial conversion of arm/gic" * 'irq-irqdomain-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits) genirq: Move irq_chip_write_msi_msg() helper to core PCI/MSI: Allow an msi_controller to be associated to an irq domain PCI/MSI: Provide mechanism to alloc/free MSI/MSIX interrupt from irqdomain PCI/MSI: Enhance core to support hierarchy irqdomain PCI/MSI: Move cached entry functions to irq core genirq: Provide default callbacks for msi_domain_ops genirq: Introduce msi_domain_alloc/free_irqs() asm-generic: Add msi.h genirq: Add generic msi irq domain support genirq: Introduce callback irq_chip.irq_write_msi_msg genirq: Work around __irq_set_handler vs stacked domains ordering issues irqdomain: Introduce helper function irq_domain_add_hierarchy() irqdomain: Implement a method to automatically call parent domains alloc/free genirq: Introduce helper irq_domain_set_info() to reduce duplicated code genirq: Split out flow handler typedefs into seperate header file genirq: Add IRQ_SET_MASK_OK_DONE to support stacked irqchip genirq: Introduce irq_chip.irq_compose_msi_msg() to support stacked irqchip genirq: Add more helper functions to support stacked irq_chip genirq: Introduce helper functions to support stacked irq_chip irqdomain: Do irq_find_mapping and set_type for hierarchy irqdomain in case OF ...
-rw-r--r--Documentation/IRQ-domain.txt71
-rw-r--r--arch/arm/include/asm/mach/pci.h10
-rw-r--r--arch/arm/kernel/bios32.c28
-rw-r--r--arch/arm/mach-iop13xx/msi.c10
-rw-r--r--arch/ia64/kernel/msi_ia64.c8
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c8
-rw-r--r--arch/mips/pci/msi-octeon.c2
-rw-r--r--arch/mips/pci/msi-xlp.c12
-rw-r--r--arch/mips/pci/pci-xlr.c2
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c8
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c6
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c6
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c6
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c2
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c2
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c2
-rw-r--r--arch/s390/pci/pci.c10
-rw-r--r--arch/sparc/kernel/pci_msi.c10
-rw-r--r--arch/tile/kernel/pci_gx.c8
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/kernel/apic/io_apic.c8
-rw-r--r--arch/x86/kernel/x86_init.c10
-rw-r--r--arch/x86/pci/xen.c19
-rw-r--r--drivers/iommu/irq_remapping.c8
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c16
-rw-r--r--drivers/of/of_pci.c8
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/host/pci-keystone-dw.c6
-rw-r--r--drivers/pci/host/pci-keystone.h2
-rw-r--r--drivers/pci/host/pci-mvebu.c13
-rw-r--r--drivers/pci/host/pci-tegra.c35
-rw-r--r--drivers/pci/host/pcie-designware.c32
-rw-r--r--drivers/pci/host/pcie-designware.h2
-rw-r--r--drivers/pci/host/pcie-rcar.c31
-rw-r--r--drivers/pci/host/pcie-xilinx.c37
-rw-r--r--drivers/pci/msi.c394
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--include/asm-generic/msi.h32
-rw-r--r--include/linux/irq.h33
-rw-r--r--include/linux/irqdomain.h101
-rw-r--r--include/linux/irqhandler.h14
-rw-r--r--include/linux/msi.h187
-rw-r--r--include/linux/of_pci.h12
-rw-r--r--include/linux/pci.h2
-rw-r--r--kernel/irq/Kconfig15
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c130
-rw-r--r--kernel/irq/irqdomain.c567
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/msi.c330
53 files changed, 1924 insertions, 351 deletions
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 8a8b82c9ca53..39cfa72732ff 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -151,3 +151,74 @@ used and no descriptor gets allocated it is very important to make sure
151that the driver using the simple domain call irq_create_mapping() 151that the driver using the simple domain call irq_create_mapping()
152before any irq_find_mapping() since the latter will actually work 152before any irq_find_mapping() since the latter will actually work
153for the static IRQ assignment case. 153for the static IRQ assignment case.
154
155==== Hierarchy IRQ domain ====
156On some architectures, there may be multiple interrupt controllers
157involved in delivering an interrupt from the device to the target CPU.
158Let's look at a typical interrupt delivering path on x86 platforms:
159
160Device --> IOAPIC -> Interrupt remapping Controller -> Local APIC -> CPU
161
162There are three interrupt controllers involved:
1631) IOAPIC controller
1642) Interrupt remapping controller
1653) Local APIC controller
166
167To support such a hardware topology and make software architecture match
168hardware architecture, an irq_domain data structure is built for each
169interrupt controller and those irq_domains are organized into hierarchy.
170When building irq_domain hierarchy, the irq_domain near to the device is
171child and the irq_domain near to CPU is parent. So a hierarchy structure
172as below will be built for the example above.
173 CPU Vector irq_domain (root irq_domain to manage CPU vectors)
174 ^
175 |
176 Interrupt Remapping irq_domain (manage irq_remapping entries)
177 ^
178 |
179 IOAPIC irq_domain (manage IOAPIC delivery entries/pins)
180
181There are four major interfaces to use hierarchy irq_domain:
1821) irq_domain_alloc_irqs(): allocate IRQ descriptors and interrupt
183 controller related resources to deliver these interrupts.
1842) irq_domain_free_irqs(): free IRQ descriptors and interrupt controller
185 related resources associated with these interrupts.
1863) irq_domain_activate_irq(): activate interrupt controller hardware to
187 deliver the interrupt.
1883) irq_domain_deactivate_irq(): deactivate interrupt controller hardware
189 to stop delivering the interrupt.
190
191Following changes are needed to support hierarchy irq_domain.
1921) a new field 'parent' is added to struct irq_domain; it's used to
193 maintain irq_domain hierarchy information.
1942) a new field 'parent_data' is added to struct irq_data; it's used to
195 build hierarchy irq_data to match hierarchy irq_domains. The irq_data
196 is used to store irq_domain pointer and hardware irq number.
1973) new callbacks are added to struct irq_domain_ops to support hierarchy
198 irq_domain operations.
199
200With support of hierarchy irq_domain and hierarchy irq_data ready, an
201irq_domain structure is built for each interrupt controller, and an
202irq_data structure is allocated for each irq_domain associated with an
203IRQ. Now we could go one step further to support stacked(hierarchy)
204irq_chip. That is, an irq_chip is associated with each irq_data along
205the hierarchy. A child irq_chip may implement a required action by
206itself or by cooperating with its parent irq_chip.
207
208With stacked irq_chip, interrupt controller driver only needs to deal
209with the hardware managed by itself and may ask for services from its
210parent irq_chip when needed. So we could achieve a much cleaner
211software architecture.
212
213For an interrupt controller driver to support hierarchy irq_domain, it
214needs to:
2151) Implement irq_domain_ops.alloc and irq_domain_ops.free
2162) Optionally implement irq_domain_ops.activate and
217 irq_domain_ops.deactivate.
2183) Optionally implement an irq_chip to manage the interrupt controller
219 hardware.
2204) No need to implement irq_domain_ops.map and irq_domain_ops.unmap,
221 they are unused with hierarchy irq_domain.
222
223Hierarchy irq_domain may also be used to support other architectures,
224such as ARM, ARM64 etc.
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 7fc42784becb..8292b5f81e23 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -22,6 +22,9 @@ struct hw_pci {
22#ifdef CONFIG_PCI_DOMAINS 22#ifdef CONFIG_PCI_DOMAINS
23 int domain; 23 int domain;
24#endif 24#endif
25#ifdef CONFIG_PCI_MSI
26 struct msi_controller *msi_ctrl;
27#endif
25 struct pci_ops *ops; 28 struct pci_ops *ops;
26 int nr_controllers; 29 int nr_controllers;
27 void **private_data; 30 void **private_data;
@@ -36,8 +39,6 @@ struct hw_pci {
36 resource_size_t start, 39 resource_size_t start,
37 resource_size_t size, 40 resource_size_t size,
38 resource_size_t align); 41 resource_size_t align);
39 void (*add_bus)(struct pci_bus *bus);
40 void (*remove_bus)(struct pci_bus *bus);
41}; 42};
42 43
43/* 44/*
@@ -47,6 +48,9 @@ struct pci_sys_data {
47#ifdef CONFIG_PCI_DOMAINS 48#ifdef CONFIG_PCI_DOMAINS
48 int domain; 49 int domain;
49#endif 50#endif
51#ifdef CONFIG_PCI_MSI
52 struct msi_controller *msi_ctrl;
53#endif
50 struct list_head node; 54 struct list_head node;
51 int busnr; /* primary bus number */ 55 int busnr; /* primary bus number */
52 u64 mem_offset; /* bus->cpu memory mapping offset */ 56 u64 mem_offset; /* bus->cpu memory mapping offset */
@@ -65,8 +69,6 @@ struct pci_sys_data {
65 resource_size_t start, 69 resource_size_t start,
66 resource_size_t size, 70 resource_size_t size,
67 resource_size_t align); 71 resource_size_t align);
68 void (*add_bus)(struct pci_bus *bus);
69 void (*remove_bus)(struct pci_bus *bus);
70 void *private_data; /* platform controller private data */ 72 void *private_data; /* platform controller private data */
71}; 73};
72 74
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 17a26c17f7f5..daaff73bc776 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -18,6 +18,15 @@
18 18
19static int debug_pci; 19static int debug_pci;
20 20
21#ifdef CONFIG_PCI_MSI
22struct msi_controller *pcibios_msi_controller(struct pci_dev *dev)
23{
24 struct pci_sys_data *sysdata = dev->bus->sysdata;
25
26 return sysdata->msi_ctrl;
27}
28#endif
29
21/* 30/*
22 * We can't use pci_get_device() here since we are 31 * We can't use pci_get_device() here since we are
23 * called from interrupt context. 32 * called from interrupt context.
@@ -360,20 +369,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
360} 369}
361EXPORT_SYMBOL(pcibios_fixup_bus); 370EXPORT_SYMBOL(pcibios_fixup_bus);
362 371
363void pcibios_add_bus(struct pci_bus *bus)
364{
365 struct pci_sys_data *sys = bus->sysdata;
366 if (sys->add_bus)
367 sys->add_bus(bus);
368}
369
370void pcibios_remove_bus(struct pci_bus *bus)
371{
372 struct pci_sys_data *sys = bus->sysdata;
373 if (sys->remove_bus)
374 sys->remove_bus(bus);
375}
376
377/* 372/*
378 * Swizzle the device pin each time we cross a bridge. If a platform does 373 * Swizzle the device pin each time we cross a bridge. If a platform does
379 * not provide a swizzle function, we perform the standard PCI swizzling. 374 * not provide a swizzle function, we perform the standard PCI swizzling.
@@ -471,12 +466,13 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
471#ifdef CONFIG_PCI_DOMAINS 466#ifdef CONFIG_PCI_DOMAINS
472 sys->domain = hw->domain; 467 sys->domain = hw->domain;
473#endif 468#endif
469#ifdef CONFIG_PCI_MSI
470 sys->msi_ctrl = hw->msi_ctrl;
471#endif
474 sys->busnr = busnr; 472 sys->busnr = busnr;
475 sys->swizzle = hw->swizzle; 473 sys->swizzle = hw->swizzle;
476 sys->map_irq = hw->map_irq; 474 sys->map_irq = hw->map_irq;
477 sys->align_resource = hw->align_resource; 475 sys->align_resource = hw->align_resource;
478 sys->add_bus = hw->add_bus;
479 sys->remove_bus = hw->remove_bus;
480 INIT_LIST_HEAD(&sys->resources); 476 INIT_LIST_HEAD(&sys->resources);
481 477
482 if (hw->private_data) 478 if (hw->private_data)
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c
index e7730cf9c15d..9f89e76dfbb9 100644
--- a/arch/arm/mach-iop13xx/msi.c
+++ b/arch/arm/mach-iop13xx/msi.c
@@ -126,10 +126,10 @@ static void iop13xx_msi_nop(struct irq_data *d)
126static struct irq_chip iop13xx_msi_chip = { 126static struct irq_chip iop13xx_msi_chip = {
127 .name = "PCI-MSI", 127 .name = "PCI-MSI",
128 .irq_ack = iop13xx_msi_nop, 128 .irq_ack = iop13xx_msi_nop,
129 .irq_enable = unmask_msi_irq, 129 .irq_enable = pci_msi_unmask_irq,
130 .irq_disable = mask_msi_irq, 130 .irq_disable = pci_msi_mask_irq,
131 .irq_mask = mask_msi_irq, 131 .irq_mask = pci_msi_mask_irq,
132 .irq_unmask = unmask_msi_irq, 132 .irq_unmask = pci_msi_unmask_irq,
133}; 133};
134 134
135int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) 135int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
@@ -153,7 +153,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
153 id = iop13xx_cpu_id(); 153 id = iop13xx_cpu_id();
154 msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); 154 msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f);
155 155
156 write_msi_msg(irq, &msg); 156 pci_write_msi_msg(irq, &msg);
157 irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); 157 irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq);
158 158
159 return 0; 159 return 0;
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 8c3730c3c63d..8ae36ea177d3 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -35,7 +35,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
35 data |= MSI_DATA_VECTOR(irq_to_vector(irq)); 35 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
36 msg.data = data; 36 msg.data = data;
37 37
38 write_msi_msg(irq, &msg); 38 pci_write_msi_msg(irq, &msg);
39 cpumask_copy(idata->affinity, cpumask_of(cpu)); 39 cpumask_copy(idata->affinity, cpumask_of(cpu));
40 40
41 return 0; 41 return 0;
@@ -71,7 +71,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
71 MSI_DATA_DELIVERY_FIXED | 71 MSI_DATA_DELIVERY_FIXED |
72 MSI_DATA_VECTOR(vector); 72 MSI_DATA_VECTOR(vector);
73 73
74 write_msi_msg(irq, &msg); 74 pci_write_msi_msg(irq, &msg);
75 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 75 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
76 76
77 return 0; 77 return 0;
@@ -102,8 +102,8 @@ static int ia64_msi_retrigger_irq(struct irq_data *data)
102 */ 102 */
103static struct irq_chip ia64_msi_chip = { 103static struct irq_chip ia64_msi_chip = {
104 .name = "PCI-MSI", 104 .name = "PCI-MSI",
105 .irq_mask = mask_msi_irq, 105 .irq_mask = pci_msi_mask_irq,
106 .irq_unmask = unmask_msi_irq, 106 .irq_unmask = pci_msi_unmask_irq,
107 .irq_ack = ia64_ack_msi_irq, 107 .irq_ack = ia64_ack_msi_irq,
108#ifdef CONFIG_SMP 108#ifdef CONFIG_SMP
109 .irq_set_affinity = ia64_set_msi_irq_affinity, 109 .irq_set_affinity = ia64_set_msi_irq_affinity,
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 446e7799928c..a0eb27b66d13 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -145,7 +145,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
145 msg.data = 0x100 + irq; 145 msg.data = 0x100 + irq;
146 146
147 irq_set_msi_desc(irq, entry); 147 irq_set_msi_desc(irq, entry);
148 write_msi_msg(irq, &msg); 148 pci_write_msi_msg(irq, &msg);
149 irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 149 irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
150 150
151 return 0; 151 return 0;
@@ -205,7 +205,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data,
205 msg.address_hi = (u32)(bus_addr >> 32); 205 msg.address_hi = (u32)(bus_addr >> 32);
206 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 206 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
207 207
208 write_msi_msg(irq, &msg); 208 pci_write_msi_msg(irq, &msg);
209 cpumask_copy(data->affinity, cpu_mask); 209 cpumask_copy(data->affinity, cpu_mask);
210 210
211 return 0; 211 return 0;
@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(struct irq_data *data)
228 228
229static struct irq_chip sn_msi_chip = { 229static struct irq_chip sn_msi_chip = {
230 .name = "PCI-MSI", 230 .name = "PCI-MSI",
231 .irq_mask = mask_msi_irq, 231 .irq_mask = pci_msi_mask_irq,
232 .irq_unmask = unmask_msi_irq, 232 .irq_unmask = pci_msi_unmask_irq,
233 .irq_ack = sn_ack_msi_irq, 233 .irq_ack = sn_ack_msi_irq,
234#ifdef CONFIG_SMP 234#ifdef CONFIG_SMP
235 .irq_set_affinity = sn_set_msi_irq_affinity, 235 .irq_set_affinity = sn_set_msi_irq_affinity,
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 63bbe07a1ccd..cffaaf4aae3c 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -178,7 +178,7 @@ msi_irq_allocated:
178 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 178 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
179 179
180 irq_set_msi_desc(irq, desc); 180 irq_set_msi_desc(irq, desc);
181 write_msi_msg(irq, &msg); 181 pci_write_msi_msg(irq, &msg);
182 return 0; 182 return 0;
183} 183}
184 184
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index f7ac3edda1b2..6a40f24c91b4 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -217,7 +217,7 @@ static void xlp_msix_mask_ack(struct irq_data *d)
217 217
218 msixvec = nlm_irq_msixvec(d->irq); 218 msixvec = nlm_irq_msixvec(d->irq);
219 link = nlm_irq_msixlink(msixvec); 219 link = nlm_irq_msixlink(msixvec);
220 mask_msi_irq(d); 220 pci_msi_mask_irq(d);
221 md = irq_data_get_irq_handler_data(d); 221 md = irq_data_get_irq_handler_data(d);
222 222
223 /* Ack MSI on bridge */ 223 /* Ack MSI on bridge */
@@ -239,10 +239,10 @@ static void xlp_msix_mask_ack(struct irq_data *d)
239 239
240static struct irq_chip xlp_msix_chip = { 240static struct irq_chip xlp_msix_chip = {
241 .name = "XLP-MSIX", 241 .name = "XLP-MSIX",
242 .irq_enable = unmask_msi_irq, 242 .irq_enable = pci_msi_unmask_irq,
243 .irq_disable = mask_msi_irq, 243 .irq_disable = pci_msi_mask_irq,
244 .irq_mask_ack = xlp_msix_mask_ack, 244 .irq_mask_ack = xlp_msix_mask_ack,
245 .irq_unmask = unmask_msi_irq, 245 .irq_unmask = pci_msi_unmask_irq,
246}; 246};
247 247
248void arch_teardown_msi_irq(unsigned int irq) 248void arch_teardown_msi_irq(unsigned int irq)
@@ -345,7 +345,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
345 if (ret < 0) 345 if (ret < 0)
346 return ret; 346 return ret;
347 347
348 write_msi_msg(xirq, &msg); 348 pci_write_msi_msg(xirq, &msg);
349 return 0; 349 return 0;
350} 350}
351 351
@@ -446,7 +446,7 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
446 if (ret < 0) 446 if (ret < 0)
447 return ret; 447 return ret;
448 448
449 write_msi_msg(xirq, &msg); 449 pci_write_msi_msg(xirq, &msg);
450 return 0; 450 return 0;
451} 451}
452 452
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
index 0dde80332d3a..26d2dabef281 100644
--- a/arch/mips/pci/pci-xlr.c
+++ b/arch/mips/pci/pci-xlr.c
@@ -260,7 +260,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
260 if (ret < 0) 260 if (ret < 0)
261 return ret; 261 return ret;
262 262
263 write_msi_msg(irq, &msg); 263 pci_write_msi_msg(irq, &msg);
264 return 0; 264 return 0;
265} 265}
266#endif 266#endif
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 862b32702d29..0883994df384 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -279,7 +279,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
279 279
280 irq_set_msi_desc(virq, entry); 280 irq_set_msi_desc(virq, entry);
281 msg.data = virq; 281 msg.data = virq;
282 write_msi_msg(virq, &msg); 282 pci_write_msi_msg(virq, &msg);
283 } 283 }
284 284
285 return 0; 285 return 0;
@@ -301,9 +301,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
301} 301}
302 302
303static struct irq_chip msic_irq_chip = { 303static struct irq_chip msic_irq_chip = {
304 .irq_mask = mask_msi_irq, 304 .irq_mask = pci_msi_mask_irq,
305 .irq_unmask = unmask_msi_irq, 305 .irq_unmask = pci_msi_unmask_irq,
306 .irq_shutdown = mask_msi_irq, 306 .irq_shutdown = pci_msi_mask_irq,
307 .name = "AXON-MSI", 307 .name = "AXON-MSI",
308}; 308};
309 309
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 4b20f2c6b3b2..540fc6dd56b3 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -90,7 +90,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
90 return rc; 90 return rc;
91 } 91 }
92 irq_set_msi_desc(virq, entry); 92 irq_set_msi_desc(virq, entry);
93 write_msi_msg(virq, &msg); 93 pci_write_msi_msg(virq, &msg);
94 } 94 }
95 return 0; 95 return 0;
96} 96}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 8b909e94fd9a..691a154c286d 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -476,7 +476,7 @@ again:
476 irq_set_msi_desc(virq, entry); 476 irq_set_msi_desc(virq, entry);
477 477
478 /* Read config space back so we can restore after reset */ 478 /* Read config space back so we can restore after reset */
479 __read_msi_msg(entry, &msg); 479 __pci_read_msi_msg(entry, &msg);
480 entry->msg = msg; 480 entry->msg = msg;
481 } 481 }
482 482
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index da08ed088157..7aed8d0876b7 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -82,8 +82,8 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
82 82
83 83
84static struct irq_chip fsl_msi_chip = { 84static struct irq_chip fsl_msi_chip = {
85 .irq_mask = mask_msi_irq, 85 .irq_mask = pci_msi_mask_irq,
86 .irq_unmask = unmask_msi_irq, 86 .irq_unmask = pci_msi_unmask_irq,
87 .irq_ack = fsl_msi_end_irq, 87 .irq_ack = fsl_msi_end_irq,
88 .irq_print_chip = fsl_msi_print_chip, 88 .irq_print_chip = fsl_msi_print_chip,
89}; 89};
@@ -242,7 +242,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
242 irq_set_msi_desc(virq, entry); 242 irq_set_msi_desc(virq, entry);
243 243
244 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); 244 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
245 write_msi_msg(virq, &msg); 245 pci_write_msi_msg(virq, &msg);
246 } 246 }
247 return 0; 247 return 0;
248 248
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 15dccd35fa11..45c114bc430b 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -42,7 +42,7 @@ static struct mpic *msi_mpic;
42static void mpic_pasemi_msi_mask_irq(struct irq_data *data) 42static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
43{ 43{
44 pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); 44 pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
45 mask_msi_irq(data); 45 pci_msi_mask_irq(data);
46 mpic_mask_irq(data); 46 mpic_mask_irq(data);
47} 47}
48 48
@@ -50,7 +50,7 @@ static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
50{ 50{
51 pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); 51 pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
52 mpic_unmask_irq(data); 52 mpic_unmask_irq(data);
53 unmask_msi_irq(data); 53 pci_msi_unmask_irq(data);
54} 54}
55 55
56static struct irq_chip mpic_pasemi_msi_chip = { 56static struct irq_chip mpic_pasemi_msi_chip = {
@@ -136,7 +136,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
136 * register to generate MSI [512...1023] 136 * register to generate MSI [512...1023]
137 */ 137 */
138 msg.data = hwirq-0x200; 138 msg.data = hwirq-0x200;
139 write_msi_msg(virq, &msg); 139 pci_write_msi_msg(virq, &msg);
140 } 140 }
141 141
142 return 0; 142 return 0;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 623d7fba15b4..0dff1cd44481 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -25,14 +25,14 @@ static struct mpic *msi_mpic;
25 25
26static void mpic_u3msi_mask_irq(struct irq_data *data) 26static void mpic_u3msi_mask_irq(struct irq_data *data)
27{ 27{
28 mask_msi_irq(data); 28 pci_msi_mask_irq(data);
29 mpic_mask_irq(data); 29 mpic_mask_irq(data);
30} 30}
31 31
32static void mpic_u3msi_unmask_irq(struct irq_data *data) 32static void mpic_u3msi_unmask_irq(struct irq_data *data)
33{ 33{
34 mpic_unmask_irq(data); 34 mpic_unmask_irq(data);
35 unmask_msi_irq(data); 35 pci_msi_unmask_irq(data);
36} 36}
37 37
38static struct irq_chip mpic_u3msi_chip = { 38static struct irq_chip mpic_u3msi_chip = {
@@ -171,7 +171,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
171 printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", 171 printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
172 virq, hwirq, (unsigned long)addr); 172 virq, hwirq, (unsigned long)addr);
173 msg.data = hwirq; 173 msg.data = hwirq;
174 write_msi_msg(virq, &msg); 174 pci_write_msi_msg(virq, &msg);
175 175
176 hwirq++; 176 hwirq++;
177 } 177 }
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index a6a4dbda9078..908105f835d1 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -85,7 +85,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
85 msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); 85 msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1);
86 return -EINVAL; 86 return -EINVAL;
87 } 87 }
88 write_msi_msg(hwirq, &msg); 88 pci_write_msi_msg(hwirq, &msg);
89 } 89 }
90 90
91 return 0; 91 return 0;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 22b5200636e7..518eabbe0bdc 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -116,7 +116,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
116 116
117 irq_set_msi_desc(virq, entry); 117 irq_set_msi_desc(virq, entry);
118 msg.data = int_no; 118 msg.data = int_no;
119 write_msi_msg(virq, &msg); 119 pci_write_msi_msg(virq, &msg);
120 } 120 }
121 return 0; 121 return 0;
122} 122}
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 3c6ee1b64e5d..4ba554ec8eaf 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -73,7 +73,7 @@ static unsigned int ics_opal_startup(struct irq_data *d)
73 * at that level, so we do it here by hand. 73 * at that level, so we do it here by hand.
74 */ 74 */
75 if (d->msi_desc) 75 if (d->msi_desc)
76 unmask_msi_irq(d); 76 pci_msi_unmask_irq(d);
77#endif 77#endif
78 78
79 /* unmask it */ 79 /* unmask it */
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index 936575d99c5c..bc81335b2cbc 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -76,7 +76,7 @@ static unsigned int ics_rtas_startup(struct irq_data *d)
76 * at that level, so we do it here by hand. 76 * at that level, so we do it here by hand.
77 */ 77 */
78 if (d->msi_desc) 78 if (d->msi_desc)
79 unmask_msi_irq(d); 79 pci_msi_unmask_irq(d);
80#endif 80#endif
81 /* unmask it */ 81 /* unmask it */
82 ics_rtas_unmask_irq(d); 82 ics_rtas_unmask_irq(d);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 2fa7b14b9c08..d59c82569750 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(zpci_list_lock);
50 50
51static struct irq_chip zpci_irq_chip = { 51static struct irq_chip zpci_irq_chip = {
52 .name = "zPCI", 52 .name = "zPCI",
53 .irq_unmask = unmask_msi_irq, 53 .irq_unmask = pci_msi_unmask_irq,
54 .irq_mask = mask_msi_irq, 54 .irq_mask = pci_msi_mask_irq,
55}; 55};
56 56
57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -403,7 +403,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
403 msg.data = hwirq; 403 msg.data = hwirq;
404 msg.address_lo = zdev->msi_addr & 0xffffffff; 404 msg.address_lo = zdev->msi_addr & 0xffffffff;
405 msg.address_hi = zdev->msi_addr >> 32; 405 msg.address_hi = zdev->msi_addr >> 32;
406 write_msi_msg(irq, &msg); 406 pci_write_msi_msg(irq, &msg);
407 airq_iv_set_data(zdev->aibv, hwirq, irq); 407 airq_iv_set_data(zdev->aibv, hwirq, irq);
408 hwirq++; 408 hwirq++;
409 } 409 }
@@ -448,9 +448,9 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
448 /* Release MSI interrupts */ 448 /* Release MSI interrupts */
449 list_for_each_entry(msi, &pdev->msi_list, list) { 449 list_for_each_entry(msi, &pdev->msi_list, list) {
450 if (msi->msi_attrib.is_msix) 450 if (msi->msi_attrib.is_msix)
451 default_msix_mask_irq(msi, 1); 451 __pci_msix_desc_mask_irq(msi, 1);
452 else 452 else
453 default_msi_mask_irq(msi, 1, 1); 453 __pci_msi_desc_mask_irq(msi, 1, 1);
454 irq_set_msi_desc(msi->irq, NULL); 454 irq_set_msi_desc(msi->irq, NULL);
455 irq_free_desc(msi->irq); 455 irq_free_desc(msi->irq);
456 msi->msg.address_lo = 0; 456 msi->msg.address_lo = 0;
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 580651af73f2..84e16d81a6d8 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -111,10 +111,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num)
111 111
112static struct irq_chip msi_irq = { 112static struct irq_chip msi_irq = {
113 .name = "PCI-MSI", 113 .name = "PCI-MSI",
114 .irq_mask = mask_msi_irq, 114 .irq_mask = pci_msi_mask_irq,
115 .irq_unmask = unmask_msi_irq, 115 .irq_unmask = pci_msi_unmask_irq,
116 .irq_enable = unmask_msi_irq, 116 .irq_enable = pci_msi_unmask_irq,
117 .irq_disable = mask_msi_irq, 117 .irq_disable = pci_msi_mask_irq,
118 /* XXX affinity XXX */ 118 /* XXX affinity XXX */
119}; 119};
120 120
@@ -161,7 +161,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p,
161 msg.data = msi; 161 msg.data = msi;
162 162
163 irq_set_msi_desc(*irq_p, entry); 163 irq_set_msi_desc(*irq_p, entry);
164 write_msi_msg(*irq_p, &msg); 164 pci_write_msi_msg(*irq_p, &msg);
165 165
166 return 0; 166 return 0;
167 167
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index e39f9c542807..e717af20dada 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -1453,7 +1453,7 @@ static struct pci_ops tile_cfg_ops = {
1453static unsigned int tilegx_msi_startup(struct irq_data *d) 1453static unsigned int tilegx_msi_startup(struct irq_data *d)
1454{ 1454{
1455 if (d->msi_desc) 1455 if (d->msi_desc)
1456 unmask_msi_irq(d); 1456 pci_msi_unmask_irq(d);
1457 1457
1458 return 0; 1458 return 0;
1459} 1459}
@@ -1465,14 +1465,14 @@ static void tilegx_msi_ack(struct irq_data *d)
1465 1465
1466static void tilegx_msi_mask(struct irq_data *d) 1466static void tilegx_msi_mask(struct irq_data *d)
1467{ 1467{
1468 mask_msi_irq(d); 1468 pci_msi_mask_irq(d);
1469 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); 1469 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
1470} 1470}
1471 1471
1472static void tilegx_msi_unmask(struct irq_data *d) 1472static void tilegx_msi_unmask(struct irq_data *d)
1473{ 1473{
1474 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); 1474 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
1475 unmask_msi_irq(d); 1475 pci_msi_unmask_irq(d);
1476} 1476}
1477 1477
1478static struct irq_chip tilegx_msi_chip = { 1478static struct irq_chip tilegx_msi_chip = {
@@ -1590,7 +1590,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1590 msg.address_hi = msi_addr >> 32; 1590 msg.address_hi = msi_addr >> 32;
1591 msg.address_lo = msi_addr & 0xffffffff; 1591 msg.address_lo = msi_addr & 0xffffffff;
1592 1592
1593 write_msi_msg(irq, &msg); 1593 pci_write_msi_msg(irq, &msg);
1594 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); 1594 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
1595 irq_set_handler_data(irq, controller); 1595 irq_set_handler_data(irq, controller);
1596 1596
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index e45e4da96bf1..f58a9c7a3c86 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -172,7 +172,6 @@ struct x86_platform_ops {
172 172
173struct pci_dev; 173struct pci_dev;
174struct msi_msg; 174struct msi_msg;
175struct msi_desc;
176 175
177struct x86_msi_ops { 176struct x86_msi_ops {
178 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); 177 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
@@ -183,8 +182,6 @@ struct x86_msi_ops {
183 void (*teardown_msi_irqs)(struct pci_dev *dev); 182 void (*teardown_msi_irqs)(struct pci_dev *dev);
184 void (*restore_msi_irqs)(struct pci_dev *dev); 183 void (*restore_msi_irqs)(struct pci_dev *dev);
185 int (*setup_hpet_msi)(unsigned int irq, unsigned int id); 184 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
186 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
187 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
188}; 185};
189 186
190struct IO_APIC_route_entry; 187struct IO_APIC_route_entry;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 1183d545da1e..7ffe0a2b870f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3158,7 +3158,7 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3158 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3158 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3159 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3159 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3160 3160
3161 __write_msi_msg(data->msi_desc, &msg); 3161 __pci_write_msi_msg(data->msi_desc, &msg);
3162 3162
3163 return IRQ_SET_MASK_OK_NOCOPY; 3163 return IRQ_SET_MASK_OK_NOCOPY;
3164} 3164}
@@ -3169,8 +3169,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3169 */ 3169 */
3170static struct irq_chip msi_chip = { 3170static struct irq_chip msi_chip = {
3171 .name = "PCI-MSI", 3171 .name = "PCI-MSI",
3172 .irq_unmask = unmask_msi_irq, 3172 .irq_unmask = pci_msi_unmask_irq,
3173 .irq_mask = mask_msi_irq, 3173 .irq_mask = pci_msi_mask_irq,
3174 .irq_ack = ack_apic_edge, 3174 .irq_ack = ack_apic_edge,
3175 .irq_set_affinity = msi_set_affinity, 3175 .irq_set_affinity = msi_set_affinity,
3176 .irq_retrigger = ioapic_retrigger_irq, 3176 .irq_retrigger = ioapic_retrigger_irq,
@@ -3196,7 +3196,7 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
3196 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. 3196 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
3197 */ 3197 */
3198 if (!irq_offset) 3198 if (!irq_offset)
3199 write_msi_msg(irq, &msg); 3199 pci_write_msi_msg(irq, &msg);
3200 3200
3201 setup_remapped_irq(irq, irq_cfg(irq), chip); 3201 setup_remapped_irq(irq, irq_cfg(irq), chip);
3202 3202
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index e48b674639cc..234b0722de53 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -116,8 +116,6 @@ struct x86_msi_ops x86_msi = {
116 .teardown_msi_irqs = default_teardown_msi_irqs, 116 .teardown_msi_irqs = default_teardown_msi_irqs,
117 .restore_msi_irqs = default_restore_msi_irqs, 117 .restore_msi_irqs = default_restore_msi_irqs,
118 .setup_hpet_msi = default_setup_hpet_msi, 118 .setup_hpet_msi = default_setup_hpet_msi,
119 .msi_mask_irq = default_msi_mask_irq,
120 .msix_mask_irq = default_msix_mask_irq,
121}; 119};
122 120
123/* MSI arch specific hooks */ 121/* MSI arch specific hooks */
@@ -140,14 +138,6 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
140{ 138{
141 x86_msi.restore_msi_irqs(dev); 139 x86_msi.restore_msi_irqs(dev);
142} 140}
143u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
144{
145 return x86_msi.msi_mask_irq(desc, mask, flag);
146}
147u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
148{
149 return x86_msi.msix_mask_irq(desc, flag);
150}
151#endif 141#endif
152 142
153struct x86_io_apic_ops x86_io_apic_ops = { 143struct x86_io_apic_ops x86_io_apic_ops = {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 093f5f4272d3..1819a91bbb9f 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -229,7 +229,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
229 return 1; 229 return 1;
230 230
231 list_for_each_entry(msidesc, &dev->msi_list, list) { 231 list_for_each_entry(msidesc, &dev->msi_list, list) {
232 __read_msi_msg(msidesc, &msg); 232 __pci_read_msi_msg(msidesc, &msg);
233 pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | 233 pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
234 ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); 234 ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
235 if (msg.data != XEN_PIRQ_MSI_DATA || 235 if (msg.data != XEN_PIRQ_MSI_DATA ||
@@ -240,7 +240,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
240 goto error; 240 goto error;
241 } 241 }
242 xen_msi_compose_msg(dev, pirq, &msg); 242 xen_msi_compose_msg(dev, pirq, &msg);
243 __write_msi_msg(msidesc, &msg); 243 __pci_write_msi_msg(msidesc, &msg);
244 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); 244 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
245 } else { 245 } else {
246 dev_dbg(&dev->dev, 246 dev_dbg(&dev->dev,
@@ -394,14 +394,7 @@ static void xen_teardown_msi_irq(unsigned int irq)
394{ 394{
395 xen_destroy_irq(irq); 395 xen_destroy_irq(irq);
396} 396}
397static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 397
398{
399 return 0;
400}
401static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag)
402{
403 return 0;
404}
405#endif 398#endif
406 399
407int __init pci_xen_init(void) 400int __init pci_xen_init(void)
@@ -425,8 +418,7 @@ int __init pci_xen_init(void)
425 x86_msi.setup_msi_irqs = xen_setup_msi_irqs; 418 x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
426 x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 419 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
427 x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; 420 x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
428 x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; 421 pci_msi_ignore_mask = 1;
429 x86_msi.msix_mask_irq = xen_nop_msix_mask_irq;
430#endif 422#endif
431 return 0; 423 return 0;
432} 424}
@@ -506,8 +498,7 @@ int __init pci_xen_initial_domain(void)
506 x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; 498 x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
507 x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 499 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
508 x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; 500 x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
509 x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; 501 pci_msi_ignore_mask = 1;
510 x86_msi.msix_mask_irq = xen_nop_msix_mask_irq;
511#endif 502#endif
512 xen_setup_acpi_sci(); 503 xen_setup_acpi_sci();
513 __acpi_register_gsi = acpi_register_gsi_xen; 504 __acpi_register_gsi = acpi_register_gsi_xen;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 74a1767c89b5..2c3f5ad01098 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
56 unsigned int irq; 56 unsigned int irq;
57 struct msi_desc *msidesc; 57 struct msi_desc *msidesc;
58 58
59 WARN_ON(!list_is_singular(&dev->msi_list));
60 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); 59 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
61 WARN_ON(msidesc->irq);
62 WARN_ON(msidesc->msi_attrib.multiple);
63 WARN_ON(msidesc->nvec_used);
64 60
65 irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); 61 irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev));
66 if (irq == 0) 62 if (irq == 0)
67 return -ENOSPC; 63 return -ENOSPC;
68 64
69 nvec_pow2 = __roundup_pow_of_two(nvec); 65 nvec_pow2 = __roundup_pow_of_two(nvec);
70 msidesc->nvec_used = nvec;
71 msidesc->msi_attrib.multiple = ilog2(nvec_pow2);
72 for (sub_handle = 0; sub_handle < nvec; sub_handle++) { 66 for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
73 if (!sub_handle) { 67 if (!sub_handle) {
74 index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); 68 index = msi_alloc_remapped_irq(dev, irq, nvec_pow2);
@@ -96,8 +90,6 @@ error:
96 * IRQs from tearing down again in default_teardown_msi_irqs() 90 * IRQs from tearing down again in default_teardown_msi_irqs()
97 */ 91 */
98 msidesc->irq = 0; 92 msidesc->irq = 0;
99 msidesc->nvec_used = 0;
100 msidesc->msi_attrib.multiple = 0;
101 93
102 return ret; 94 return ret;
103} 95}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a3fd2b37ddb6..463c235acbdc 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -132,7 +132,7 @@ static void armada_370_xp_free_msi(int hwirq)
132 mutex_unlock(&msi_used_lock); 132 mutex_unlock(&msi_used_lock);
133} 133}
134 134
135static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, 135static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
136 struct pci_dev *pdev, 136 struct pci_dev *pdev,
137 struct msi_desc *desc) 137 struct msi_desc *desc)
138{ 138{
@@ -159,11 +159,11 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
159 msg.address_hi = 0; 159 msg.address_hi = 0;
160 msg.data = 0xf00 | (hwirq + 16); 160 msg.data = 0xf00 | (hwirq + 16);
161 161
162 write_msi_msg(virq, &msg); 162 pci_write_msi_msg(virq, &msg);
163 return 0; 163 return 0;
164} 164}
165 165
166static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, 166static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
167 unsigned int irq) 167 unsigned int irq)
168{ 168{
169 struct irq_data *d = irq_get_irq_data(irq); 169 struct irq_data *d = irq_get_irq_data(irq);
@@ -175,10 +175,10 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
175 175
176static struct irq_chip armada_370_xp_msi_irq_chip = { 176static struct irq_chip armada_370_xp_msi_irq_chip = {
177 .name = "armada_370_xp_msi_irq", 177 .name = "armada_370_xp_msi_irq",
178 .irq_enable = unmask_msi_irq, 178 .irq_enable = pci_msi_unmask_irq,
179 .irq_disable = mask_msi_irq, 179 .irq_disable = pci_msi_mask_irq,
180 .irq_mask = mask_msi_irq, 180 .irq_mask = pci_msi_mask_irq,
181 .irq_unmask = unmask_msi_irq, 181 .irq_unmask = pci_msi_unmask_irq,
182}; 182};
183 183
184static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, 184static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
@@ -198,7 +198,7 @@ static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
198static int armada_370_xp_msi_init(struct device_node *node, 198static int armada_370_xp_msi_init(struct device_node *node,
199 phys_addr_t main_int_phys_base) 199 phys_addr_t main_int_phys_base)
200{ 200{
201 struct msi_chip *msi_chip; 201 struct msi_controller *msi_chip;
202 u32 reg; 202 u32 reg;
203 int ret; 203 int ret;
204 204
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 8882b467be95..88471d3d98cd 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
236static LIST_HEAD(of_pci_msi_chip_list); 236static LIST_HEAD(of_pci_msi_chip_list);
237static DEFINE_MUTEX(of_pci_msi_chip_mutex); 237static DEFINE_MUTEX(of_pci_msi_chip_mutex);
238 238
239int of_pci_msi_chip_add(struct msi_chip *chip) 239int of_pci_msi_chip_add(struct msi_controller *chip)
240{ 240{
241 if (!of_property_read_bool(chip->of_node, "msi-controller")) 241 if (!of_property_read_bool(chip->of_node, "msi-controller"))
242 return -EINVAL; 242 return -EINVAL;
@@ -249,7 +249,7 @@ int of_pci_msi_chip_add(struct msi_chip *chip)
249} 249}
250EXPORT_SYMBOL_GPL(of_pci_msi_chip_add); 250EXPORT_SYMBOL_GPL(of_pci_msi_chip_add);
251 251
252void of_pci_msi_chip_remove(struct msi_chip *chip) 252void of_pci_msi_chip_remove(struct msi_controller *chip)
253{ 253{
254 mutex_lock(&of_pci_msi_chip_mutex); 254 mutex_lock(&of_pci_msi_chip_mutex);
255 list_del(&chip->list); 255 list_del(&chip->list);
@@ -257,9 +257,9 @@ void of_pci_msi_chip_remove(struct msi_chip *chip)
257} 257}
258EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove); 258EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove);
259 259
260struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node) 260struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
261{ 261{
262 struct msi_chip *c; 262 struct msi_controller *c;
263 263
264 mutex_lock(&of_pci_msi_chip_mutex); 264 mutex_lock(&of_pci_msi_chip_mutex);
265 list_for_each_entry(c, &of_pci_msi_chip_list, list) { 265 list_for_each_entry(c, &of_pci_msi_chip_list, list) {
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 893503fa1782..cced84233ac0 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -4,6 +4,7 @@
4config PCI_MSI 4config PCI_MSI
5 bool "Message Signaled Interrupts (MSI and MSI-X)" 5 bool "Message Signaled Interrupts (MSI and MSI-X)"
6 depends on PCI 6 depends on PCI
7 select GENERIC_MSI_IRQ
7 help 8 help
8 This allows device drivers to enable MSI (Message Signaled 9 This allows device drivers to enable MSI (Message Signaled
9 Interrupts). Message Signaled Interrupts enable a device to 10 Interrupts). Message Signaled Interrupts enable a device to
@@ -16,6 +17,11 @@ config PCI_MSI
16 17
17 If you don't know what to do here, say Y. 18 If you don't know what to do here, say Y.
18 19
20config PCI_MSI_IRQ_DOMAIN
21 bool
22 depends on PCI_MSI
23 select GENERIC_MSI_IRQ_DOMAIN
24
19config PCI_DEBUG 25config PCI_DEBUG
20 bool "PCI Debugging" 26 bool "PCI Debugging"
21 depends on PCI && DEBUG_KERNEL 27 depends on PCI && DEBUG_KERNEL
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 34086ce88e8e..313338db0e43 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -155,7 +155,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
155 /* Mask the end point if PVM implemented */ 155 /* Mask the end point if PVM implemented */
156 if (IS_ENABLED(CONFIG_PCI_MSI)) { 156 if (IS_ENABLED(CONFIG_PCI_MSI)) {
157 if (msi->msi_attrib.maskbit) 157 if (msi->msi_attrib.maskbit)
158 mask_msi_irq(d); 158 pci_msi_mask_irq(d);
159 } 159 }
160 160
161 ks_dw_pcie_msi_clear_irq(pp, offset); 161 ks_dw_pcie_msi_clear_irq(pp, offset);
@@ -177,7 +177,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
177 /* Mask the end point if PVM implemented */ 177 /* Mask the end point if PVM implemented */
178 if (IS_ENABLED(CONFIG_PCI_MSI)) { 178 if (IS_ENABLED(CONFIG_PCI_MSI)) {
179 if (msi->msi_attrib.maskbit) 179 if (msi->msi_attrib.maskbit)
180 unmask_msi_irq(d); 180 pci_msi_unmask_irq(d);
181 } 181 }
182 182
183 ks_dw_pcie_msi_set_irq(pp, offset); 183 ks_dw_pcie_msi_set_irq(pp, offset);
@@ -205,7 +205,7 @@ const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
205 .map = ks_dw_pcie_msi_map, 205 .map = ks_dw_pcie_msi_map,
206}; 206};
207 207
208int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip) 208int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
209{ 209{
210 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 210 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
211 int i; 211 int i;
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index 1fc1fceede9e..478d932b602d 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -55,4 +55,4 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
55void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); 55void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
56void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); 56void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
57int ks_dw_pcie_msi_host_init(struct pcie_port *pp, 57int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
58 struct msi_chip *chip); 58 struct msi_controller *chip);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index b1315e197ffb..9aa810b733a8 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -99,7 +99,7 @@ struct mvebu_pcie_port;
99struct mvebu_pcie { 99struct mvebu_pcie {
100 struct platform_device *pdev; 100 struct platform_device *pdev;
101 struct mvebu_pcie_port *ports; 101 struct mvebu_pcie_port *ports;
102 struct msi_chip *msi; 102 struct msi_controller *msi;
103 struct resource io; 103 struct resource io;
104 char io_name[30]; 104 char io_name[30];
105 struct resource realio; 105 struct resource realio;
@@ -774,12 +774,6 @@ static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
774 return bus; 774 return bus;
775} 775}
776 776
777static void mvebu_pcie_add_bus(struct pci_bus *bus)
778{
779 struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
780 bus->msi = pcie->msi;
781}
782
783static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, 777static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
784 const struct resource *res, 778 const struct resource *res,
785 resource_size_t start, 779 resource_size_t start,
@@ -816,6 +810,10 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
816 810
817 memset(&hw, 0, sizeof(hw)); 811 memset(&hw, 0, sizeof(hw));
818 812
813#ifdef CONFIG_PCI_MSI
814 hw.msi_ctrl = pcie->msi;
815#endif
816
819 hw.nr_controllers = 1; 817 hw.nr_controllers = 1;
820 hw.private_data = (void **)&pcie; 818 hw.private_data = (void **)&pcie;
821 hw.setup = mvebu_pcie_setup; 819 hw.setup = mvebu_pcie_setup;
@@ -823,7 +821,6 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
823 hw.map_irq = of_irq_parse_and_map_pci; 821 hw.map_irq = of_irq_parse_and_map_pci;
824 hw.ops = &mvebu_pcie_ops; 822 hw.ops = &mvebu_pcie_ops;
825 hw.align_resource = mvebu_pcie_align_resource; 823 hw.align_resource = mvebu_pcie_align_resource;
826 hw.add_bus = mvebu_pcie_add_bus;
827 824
828 pci_common_init(&hw); 825 pci_common_init(&hw);
829} 826}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 19bb19c7db4a..feccfa6b6c11 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -238,7 +238,7 @@
238 ) 238 )
239 239
240struct tegra_msi { 240struct tegra_msi {
241 struct msi_chip chip; 241 struct msi_controller chip;
242 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 242 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243 struct irq_domain *domain; 243 struct irq_domain *domain;
244 unsigned long pages; 244 unsigned long pages;
@@ -259,7 +259,7 @@ struct tegra_pcie_soc_data {
259 bool has_gen2; 259 bool has_gen2;
260}; 260};
261 261
262static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) 262static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
263{ 263{
264 return container_of(chip, struct tegra_msi, chip); 264 return container_of(chip, struct tegra_msi, chip);
265} 265}
@@ -692,15 +692,6 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
692 return irq; 692 return irq;
693} 693}
694 694
695static void tegra_pcie_add_bus(struct pci_bus *bus)
696{
697 if (IS_ENABLED(CONFIG_PCI_MSI)) {
698 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
699
700 bus->msi = &pcie->msi.chip;
701 }
702}
703
704static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) 695static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
705{ 696{
706 struct tegra_pcie *pcie = sys_to_pcie(sys); 697 struct tegra_pcie *pcie = sys_to_pcie(sys);
@@ -1280,8 +1271,8 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1280 return processed > 0 ? IRQ_HANDLED : IRQ_NONE; 1271 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1281} 1272}
1282 1273
1283static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 1274static int tegra_msi_setup_irq(struct msi_controller *chip,
1284 struct msi_desc *desc) 1275 struct pci_dev *pdev, struct msi_desc *desc)
1285{ 1276{
1286 struct tegra_msi *msi = to_tegra_msi(chip); 1277 struct tegra_msi *msi = to_tegra_msi(chip);
1287 struct msi_msg msg; 1278 struct msi_msg msg;
@@ -1305,12 +1296,13 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1305 msg.address_hi = 0; 1296 msg.address_hi = 0;
1306 msg.data = hwirq; 1297 msg.data = hwirq;
1307 1298
1308 write_msi_msg(irq, &msg); 1299 pci_write_msi_msg(irq, &msg);
1309 1300
1310 return 0; 1301 return 0;
1311} 1302}
1312 1303
1313static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 1304static void tegra_msi_teardown_irq(struct msi_controller *chip,
1305 unsigned int irq)
1314{ 1306{
1315 struct tegra_msi *msi = to_tegra_msi(chip); 1307 struct tegra_msi *msi = to_tegra_msi(chip);
1316 struct irq_data *d = irq_get_irq_data(irq); 1308 struct irq_data *d = irq_get_irq_data(irq);
@@ -1322,10 +1314,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1322 1314
1323static struct irq_chip tegra_msi_irq_chip = { 1315static struct irq_chip tegra_msi_irq_chip = {
1324 .name = "Tegra PCIe MSI", 1316 .name = "Tegra PCIe MSI",
1325 .irq_enable = unmask_msi_irq, 1317 .irq_enable = pci_msi_unmask_irq,
1326 .irq_disable = mask_msi_irq, 1318 .irq_disable = pci_msi_mask_irq,
1327 .irq_mask = mask_msi_irq, 1319 .irq_mask = pci_msi_mask_irq,
1328 .irq_unmask = unmask_msi_irq, 1320 .irq_unmask = pci_msi_unmask_irq,
1329}; 1321};
1330 1322
1331static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, 1323static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
@@ -1893,11 +1885,14 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
1893 1885
1894 memset(&hw, 0, sizeof(hw)); 1886 memset(&hw, 0, sizeof(hw));
1895 1887
1888#ifdef CONFIG_PCI_MSI
1889 hw.msi_ctrl = &pcie->msi.chip;
1890#endif
1891
1896 hw.nr_controllers = 1; 1892 hw.nr_controllers = 1;
1897 hw.private_data = (void **)&pcie; 1893 hw.private_data = (void **)&pcie;
1898 hw.setup = tegra_pcie_setup; 1894 hw.setup = tegra_pcie_setup;
1899 hw.map_irq = tegra_pcie_map_irq; 1895 hw.map_irq = tegra_pcie_map_irq;
1900 hw.add_bus = tegra_pcie_add_bus;
1901 hw.scan = tegra_pcie_scan_bus; 1896 hw.scan = tegra_pcie_scan_bus;
1902 hw.ops = &tegra_pcie_ops; 1897 hw.ops = &tegra_pcie_ops;
1903 1898
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index dfed00aa3ac0..17b51550f9b5 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -152,10 +152,10 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
152 152
153static struct irq_chip dw_msi_irq_chip = { 153static struct irq_chip dw_msi_irq_chip = {
154 .name = "PCI-MSI", 154 .name = "PCI-MSI",
155 .irq_enable = unmask_msi_irq, 155 .irq_enable = pci_msi_unmask_irq,
156 .irq_disable = mask_msi_irq, 156 .irq_disable = pci_msi_mask_irq,
157 .irq_mask = mask_msi_irq, 157 .irq_mask = pci_msi_mask_irq,
158 .irq_unmask = unmask_msi_irq, 158 .irq_unmask = pci_msi_unmask_irq,
159}; 159};
160 160
161/* MSI int handler */ 161/* MSI int handler */
@@ -276,7 +276,7 @@ no_valid_irq:
276 return -ENOSPC; 276 return -ENOSPC;
277} 277}
278 278
279static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 279static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
280 struct msi_desc *desc) 280 struct msi_desc *desc)
281{ 281{
282 int irq, pos; 282 int irq, pos;
@@ -298,12 +298,12 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
298 else 298 else
299 msg.data = pos; 299 msg.data = pos;
300 300
301 write_msi_msg(irq, &msg); 301 pci_write_msi_msg(irq, &msg);
302 302
303 return 0; 303 return 0;
304} 304}
305 305
306static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 306static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
307{ 307{
308 struct irq_data *data = irq_get_irq_data(irq); 308 struct irq_data *data = irq_get_irq_data(irq);
309 struct msi_desc *msi = irq_data_get_msi(data); 309 struct msi_desc *msi = irq_data_get_msi(data);
@@ -312,7 +312,7 @@ static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
312 clear_irq_range(pp, irq, 1, data->hwirq); 312 clear_irq_range(pp, irq, 1, data->hwirq);
313} 313}
314 314
315static struct msi_chip dw_pcie_msi_chip = { 315static struct msi_controller dw_pcie_msi_chip = {
316 .setup_irq = dw_msi_setup_irq, 316 .setup_irq = dw_msi_setup_irq,
317 .teardown_irq = dw_msi_teardown_irq, 317 .teardown_irq = dw_msi_teardown_irq,
318}; 318};
@@ -498,6 +498,11 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
498 val |= PORT_LOGIC_SPEED_CHANGE; 498 val |= PORT_LOGIC_SPEED_CHANGE;
499 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); 499 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
500 500
501#ifdef CONFIG_PCI_MSI
502 dw_pcie_msi_chip.dev = pp->dev;
503 dw_pci.msi_ctrl = &dw_pcie_msi_chip;
504#endif
505
501 dw_pci.nr_controllers = 1; 506 dw_pci.nr_controllers = 1;
502 dw_pci.private_data = (void **)&pp; 507 dw_pci.private_data = (void **)&pp;
503 508
@@ -747,21 +752,10 @@ static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
747 return irq; 752 return irq;
748} 753}
749 754
750static void dw_pcie_add_bus(struct pci_bus *bus)
751{
752 if (IS_ENABLED(CONFIG_PCI_MSI)) {
753 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
754
755 dw_pcie_msi_chip.dev = pp->dev;
756 bus->msi = &dw_pcie_msi_chip;
757 }
758}
759
760static struct hw_pci dw_pci = { 755static struct hw_pci dw_pci = {
761 .setup = dw_pcie_setup, 756 .setup = dw_pcie_setup,
762 .scan = dw_pcie_scan_bus, 757 .scan = dw_pcie_scan_bus,
763 .map_irq = dw_pcie_map_irq, 758 .map_irq = dw_pcie_map_irq,
764 .add_bus = dw_pcie_add_bus,
765}; 759};
766 760
767void dw_pcie_setup_rc(struct pcie_port *pp) 761void dw_pcie_setup_rc(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index c6256751daff..d0bbd276840d 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -73,7 +73,7 @@ struct pcie_host_ops {
73 u32 (*get_msi_addr)(struct pcie_port *pp); 73 u32 (*get_msi_addr)(struct pcie_port *pp);
74 u32 (*get_msi_data)(struct pcie_port *pp, int pos); 74 u32 (*get_msi_data)(struct pcie_port *pp, int pos);
75 void (*scan_bus)(struct pcie_port *pp); 75 void (*scan_bus)(struct pcie_port *pp);
76 int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip); 76 int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
77}; 77};
78 78
79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); 79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 61158e03ab5f..d3053e53cf35 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -111,14 +111,14 @@
111struct rcar_msi { 111struct rcar_msi {
112 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 112 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
113 struct irq_domain *domain; 113 struct irq_domain *domain;
114 struct msi_chip chip; 114 struct msi_controller chip;
115 unsigned long pages; 115 unsigned long pages;
116 struct mutex lock; 116 struct mutex lock;
117 int irq1; 117 int irq1;
118 int irq2; 118 int irq2;
119}; 119};
120 120
121static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) 121static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
122{ 122{
123 return container_of(chip, struct rcar_msi, chip); 123 return container_of(chip, struct rcar_msi, chip);
124} 124}
@@ -380,20 +380,10 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
380 return 1; 380 return 1;
381} 381}
382 382
383static void rcar_pcie_add_bus(struct pci_bus *bus)
384{
385 if (IS_ENABLED(CONFIG_PCI_MSI)) {
386 struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
387
388 bus->msi = &pcie->msi.chip;
389 }
390}
391
392struct hw_pci rcar_pci = { 383struct hw_pci rcar_pci = {
393 .setup = rcar_pcie_setup, 384 .setup = rcar_pcie_setup,
394 .map_irq = of_irq_parse_and_map_pci, 385 .map_irq = of_irq_parse_and_map_pci,
395 .ops = &rcar_pcie_ops, 386 .ops = &rcar_pcie_ops,
396 .add_bus = rcar_pcie_add_bus,
397}; 387};
398 388
399static void rcar_pcie_enable(struct rcar_pcie *pcie) 389static void rcar_pcie_enable(struct rcar_pcie *pcie)
@@ -402,6 +392,9 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie)
402 392
403 rcar_pci.nr_controllers = 1; 393 rcar_pci.nr_controllers = 1;
404 rcar_pci.private_data = (void **)&pcie; 394 rcar_pci.private_data = (void **)&pcie;
395#ifdef CONFIG_PCI_MSI
396 rcar_pci.msi_ctrl = &pcie->msi.chip;
397#endif
405 398
406 pci_common_init_dev(&pdev->dev, &rcar_pci); 399 pci_common_init_dev(&pdev->dev, &rcar_pci);
407#ifdef CONFIG_PCI_DOMAINS 400#ifdef CONFIG_PCI_DOMAINS
@@ -622,7 +615,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
622 return IRQ_HANDLED; 615 return IRQ_HANDLED;
623} 616}
624 617
625static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 618static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
626 struct msi_desc *desc) 619 struct msi_desc *desc)
627{ 620{
628 struct rcar_msi *msi = to_rcar_msi(chip); 621 struct rcar_msi *msi = to_rcar_msi(chip);
@@ -647,12 +640,12 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
647 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); 640 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
648 msg.data = hwirq; 641 msg.data = hwirq;
649 642
650 write_msi_msg(irq, &msg); 643 pci_write_msi_msg(irq, &msg);
651 644
652 return 0; 645 return 0;
653} 646}
654 647
655static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 648static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
656{ 649{
657 struct rcar_msi *msi = to_rcar_msi(chip); 650 struct rcar_msi *msi = to_rcar_msi(chip);
658 struct irq_data *d = irq_get_irq_data(irq); 651 struct irq_data *d = irq_get_irq_data(irq);
@@ -662,10 +655,10 @@ static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
662 655
663static struct irq_chip rcar_msi_irq_chip = { 656static struct irq_chip rcar_msi_irq_chip = {
664 .name = "R-Car PCIe MSI", 657 .name = "R-Car PCIe MSI",
665 .irq_enable = unmask_msi_irq, 658 .irq_enable = pci_msi_unmask_irq,
666 .irq_disable = mask_msi_irq, 659 .irq_disable = pci_msi_mask_irq,
667 .irq_mask = mask_msi_irq, 660 .irq_mask = pci_msi_mask_irq,
668 .irq_unmask = unmask_msi_irq, 661 .irq_unmask = pci_msi_unmask_irq,
669}; 662};
670 663
671static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, 664static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index ccc496b33a97..2f50fa5953fd 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -335,7 +335,8 @@ static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
335 * @chip: MSI Chip descriptor 335 * @chip: MSI Chip descriptor
336 * @irq: MSI IRQ to destroy 336 * @irq: MSI IRQ to destroy
337 */ 337 */
338static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 338static void xilinx_msi_teardown_irq(struct msi_controller *chip,
339 unsigned int irq)
339{ 340{
340 xilinx_pcie_destroy_msi(irq); 341 xilinx_pcie_destroy_msi(irq);
341} 342}
@@ -348,7 +349,7 @@ static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
348 * 349 *
349 * Return: '0' on success and error value on failure 350 * Return: '0' on success and error value on failure
350 */ 351 */
351static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, 352static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
352 struct pci_dev *pdev, 353 struct pci_dev *pdev,
353 struct msi_desc *desc) 354 struct msi_desc *desc)
354{ 355{
@@ -374,13 +375,13 @@ static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
374 msg.address_lo = msg_addr; 375 msg.address_lo = msg_addr;
375 msg.data = irq; 376 msg.data = irq;
376 377
377 write_msi_msg(irq, &msg); 378 pci_write_msi_msg(irq, &msg);
378 379
379 return 0; 380 return 0;
380} 381}
381 382
382/* MSI Chip Descriptor */ 383/* MSI Chip Descriptor */
383static struct msi_chip xilinx_pcie_msi_chip = { 384static struct msi_controller xilinx_pcie_msi_chip = {
384 .setup_irq = xilinx_pcie_msi_setup_irq, 385 .setup_irq = xilinx_pcie_msi_setup_irq,
385 .teardown_irq = xilinx_msi_teardown_irq, 386 .teardown_irq = xilinx_msi_teardown_irq,
386}; 387};
@@ -388,10 +389,10 @@ static struct msi_chip xilinx_pcie_msi_chip = {
388/* HW Interrupt Chip Descriptor */ 389/* HW Interrupt Chip Descriptor */
389static struct irq_chip xilinx_msi_irq_chip = { 390static struct irq_chip xilinx_msi_irq_chip = {
390 .name = "Xilinx PCIe MSI", 391 .name = "Xilinx PCIe MSI",
391 .irq_enable = unmask_msi_irq, 392 .irq_enable = pci_msi_unmask_irq,
392 .irq_disable = mask_msi_irq, 393 .irq_disable = pci_msi_mask_irq,
393 .irq_mask = mask_msi_irq, 394 .irq_mask = pci_msi_mask_irq,
394 .irq_unmask = unmask_msi_irq, 395 .irq_unmask = pci_msi_unmask_irq,
395}; 396};
396 397
397/** 398/**
@@ -431,20 +432,6 @@ static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
431 pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); 432 pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
432} 433}
433 434
434/**
435 * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
436 * @bus: PCIe bus
437 */
438static void xilinx_pcie_add_bus(struct pci_bus *bus)
439{
440 if (IS_ENABLED(CONFIG_PCI_MSI)) {
441 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
442
443 xilinx_pcie_msi_chip.dev = port->dev;
444 bus->msi = &xilinx_pcie_msi_chip;
445 }
446}
447
448/* INTx Functions */ 435/* INTx Functions */
449 436
450/** 437/**
@@ -924,10 +911,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
924 .private_data = (void **)&port, 911 .private_data = (void **)&port,
925 .setup = xilinx_pcie_setup, 912 .setup = xilinx_pcie_setup,
926 .map_irq = of_irq_parse_and_map_pci, 913 .map_irq = of_irq_parse_and_map_pci,
927 .add_bus = xilinx_pcie_add_bus,
928 .scan = xilinx_pcie_scan_bus, 914 .scan = xilinx_pcie_scan_bus,
929 .ops = &xilinx_pcie_ops, 915 .ops = &xilinx_pcie_ops,
930 }; 916 };
917
918#ifdef CONFIG_PCI_MSI
919 xilinx_pcie_msi_chip.dev = port->dev;
920 hw.msi_ctrl = &xilinx_pcie_msi_chip;
921#endif
931 pci_common_init_dev(dev, &hw); 922 pci_common_init_dev(dev, &hw);
932 923
933 return 0; 924 return 0;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 084587d7cd13..fd60806d3fd0 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -19,19 +19,82 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/irqdomain.h>
22 23
23#include "pci.h" 24#include "pci.h"
24 25
25static int pci_msi_enable = 1; 26static int pci_msi_enable = 1;
27int pci_msi_ignore_mask;
26 28
27#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) 29#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
28 30
31#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
32static struct irq_domain *pci_msi_default_domain;
33static DEFINE_MUTEX(pci_msi_domain_lock);
34
35struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
36{
37 return pci_msi_default_domain;
38}
39
40static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
41{
42 struct irq_domain *domain = NULL;
43
44 if (dev->bus->msi)
45 domain = dev->bus->msi->domain;
46 if (!domain)
47 domain = arch_get_pci_msi_domain(dev);
48
49 return domain;
50}
51
52static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
53{
54 struct irq_domain *domain;
55
56 domain = pci_msi_get_domain(dev);
57 if (domain)
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
59
60 return arch_setup_msi_irqs(dev, nvec, type);
61}
62
63static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
64{
65 struct irq_domain *domain;
66
67 domain = pci_msi_get_domain(dev);
68 if (domain)
69 pci_msi_domain_free_irqs(domain, dev);
70 else
71 arch_teardown_msi_irqs(dev);
72}
73#else
74#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
75#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
76#endif
29 77
30/* Arch hooks */ 78/* Arch hooks */
31 79
80struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev)
81{
82 return NULL;
83}
84
85static struct msi_controller *pci_msi_controller(struct pci_dev *dev)
86{
87 struct msi_controller *msi_ctrl = dev->bus->msi;
88
89 if (msi_ctrl)
90 return msi_ctrl;
91
92 return pcibios_msi_controller(dev);
93}
94
32int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) 95int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
33{ 96{
34 struct msi_chip *chip = dev->bus->msi; 97 struct msi_controller *chip = pci_msi_controller(dev);
35 int err; 98 int err;
36 99
37 if (!chip || !chip->setup_irq) 100 if (!chip || !chip->setup_irq)
@@ -48,7 +111,7 @@ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
48 111
49void __weak arch_teardown_msi_irq(unsigned int irq) 112void __weak arch_teardown_msi_irq(unsigned int irq)
50{ 113{
51 struct msi_chip *chip = irq_get_chip_data(irq); 114 struct msi_controller *chip = irq_get_chip_data(irq);
52 115
53 if (!chip || !chip->teardown_irq) 116 if (!chip || !chip->teardown_irq)
54 return; 117 return;
@@ -85,19 +148,13 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
85 */ 148 */
86void default_teardown_msi_irqs(struct pci_dev *dev) 149void default_teardown_msi_irqs(struct pci_dev *dev)
87{ 150{
151 int i;
88 struct msi_desc *entry; 152 struct msi_desc *entry;
89 153
90 list_for_each_entry(entry, &dev->msi_list, list) { 154 list_for_each_entry(entry, &dev->msi_list, list)
91 int i, nvec; 155 if (entry->irq)
92 if (entry->irq == 0) 156 for (i = 0; i < entry->nvec_used; i++)
93 continue; 157 arch_teardown_msi_irq(entry->irq + i);
94 if (entry->nvec_used)
95 nvec = entry->nvec_used;
96 else
97 nvec = 1 << entry->msi_attrib.multiple;
98 for (i = 0; i < nvec; i++)
99 arch_teardown_msi_irq(entry->irq + i);
100 }
101} 158}
102 159
103void __weak arch_teardown_msi_irqs(struct pci_dev *dev) 160void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -120,7 +177,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
120 } 177 }
121 178
122 if (entry) 179 if (entry)
123 __write_msi_msg(entry, &entry->msg); 180 __pci_write_msi_msg(entry, &entry->msg);
124} 181}
125 182
126void __weak arch_restore_msi_irqs(struct pci_dev *dev) 183void __weak arch_restore_msi_irqs(struct pci_dev *dev)
@@ -163,11 +220,11 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
163 * reliably as devices without an INTx disable bit will then generate a 220 * reliably as devices without an INTx disable bit will then generate a
164 * level IRQ which will never be cleared. 221 * level IRQ which will never be cleared.
165 */ 222 */
166u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 223u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
167{ 224{
168 u32 mask_bits = desc->masked; 225 u32 mask_bits = desc->masked;
169 226
170 if (!desc->msi_attrib.maskbit) 227 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
171 return 0; 228 return 0;
172 229
173 mask_bits &= ~mask; 230 mask_bits &= ~mask;
@@ -177,14 +234,9 @@ u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
177 return mask_bits; 234 return mask_bits;
178} 235}
179 236
180__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
181{
182 return default_msi_mask_irq(desc, mask, flag);
183}
184
185static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 237static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
186{ 238{
187 desc->masked = arch_msi_mask_irq(desc, mask, flag); 239 desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
188} 240}
189 241
190/* 242/*
@@ -194,11 +246,15 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
194 * file. This saves a few milliseconds when initialising devices with lots 246 * file. This saves a few milliseconds when initialising devices with lots
195 * of MSI-X interrupts. 247 * of MSI-X interrupts.
196 */ 248 */
197u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) 249u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
198{ 250{
199 u32 mask_bits = desc->masked; 251 u32 mask_bits = desc->masked;
200 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 252 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
201 PCI_MSIX_ENTRY_VECTOR_CTRL; 253 PCI_MSIX_ENTRY_VECTOR_CTRL;
254
255 if (pci_msi_ignore_mask)
256 return 0;
257
202 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; 258 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
203 if (flag) 259 if (flag)
204 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; 260 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
@@ -207,14 +263,9 @@ u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag)
207 return mask_bits; 263 return mask_bits;
208} 264}
209 265
210__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
211{
212 return default_msix_mask_irq(desc, flag);
213}
214
215static void msix_mask_irq(struct msi_desc *desc, u32 flag) 266static void msix_mask_irq(struct msi_desc *desc, u32 flag)
216{ 267{
217 desc->masked = arch_msix_mask_irq(desc, flag); 268 desc->masked = __pci_msix_desc_mask_irq(desc, flag);
218} 269}
219 270
220static void msi_set_mask_bit(struct irq_data *data, u32 flag) 271static void msi_set_mask_bit(struct irq_data *data, u32 flag)
@@ -230,12 +281,20 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag)
230 } 281 }
231} 282}
232 283
233void mask_msi_irq(struct irq_data *data) 284/**
285 * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts
286 * @data: pointer to irqdata associated to that interrupt
287 */
288void pci_msi_mask_irq(struct irq_data *data)
234{ 289{
235 msi_set_mask_bit(data, 1); 290 msi_set_mask_bit(data, 1);
236} 291}
237 292
238void unmask_msi_irq(struct irq_data *data) 293/**
294 * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts
295 * @data: pointer to irqdata associated to that interrupt
296 */
297void pci_msi_unmask_irq(struct irq_data *data)
239{ 298{
240 msi_set_mask_bit(data, 0); 299 msi_set_mask_bit(data, 0);
241} 300}
@@ -244,12 +303,11 @@ void default_restore_msi_irqs(struct pci_dev *dev)
244{ 303{
245 struct msi_desc *entry; 304 struct msi_desc *entry;
246 305
247 list_for_each_entry(entry, &dev->msi_list, list) { 306 list_for_each_entry(entry, &dev->msi_list, list)
248 default_restore_msi_irq(dev, entry->irq); 307 default_restore_msi_irq(dev, entry->irq);
249 }
250} 308}
251 309
252void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 310void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
253{ 311{
254 BUG_ON(entry->dev->current_state != PCI_D0); 312 BUG_ON(entry->dev->current_state != PCI_D0);
255 313
@@ -279,32 +337,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
279 } 337 }
280} 338}
281 339
282void read_msi_msg(unsigned int irq, struct msi_msg *msg) 340void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
283{
284 struct msi_desc *entry = irq_get_msi_desc(irq);
285
286 __read_msi_msg(entry, msg);
287}
288
289void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
290{
291 /* Assert that the cache is valid, assuming that
292 * valid messages are not all-zeroes. */
293 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
294 entry->msg.data));
295
296 *msg = entry->msg;
297}
298
299void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
300{
301 struct msi_desc *entry = irq_get_msi_desc(irq);
302
303 __get_cached_msi_msg(entry, msg);
304}
305EXPORT_SYMBOL_GPL(get_cached_msi_msg);
306
307void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
308{ 341{
309 if (entry->dev->current_state != PCI_D0) { 342 if (entry->dev->current_state != PCI_D0) {
310 /* Don't touch the hardware now */ 343 /* Don't touch the hardware now */
@@ -341,34 +374,27 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
341 entry->msg = *msg; 374 entry->msg = *msg;
342} 375}
343 376
344void write_msi_msg(unsigned int irq, struct msi_msg *msg) 377void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
345{ 378{
346 struct msi_desc *entry = irq_get_msi_desc(irq); 379 struct msi_desc *entry = irq_get_msi_desc(irq);
347 380
348 __write_msi_msg(entry, msg); 381 __pci_write_msi_msg(entry, msg);
349} 382}
350EXPORT_SYMBOL_GPL(write_msi_msg); 383EXPORT_SYMBOL_GPL(pci_write_msi_msg);
351 384
352static void free_msi_irqs(struct pci_dev *dev) 385static void free_msi_irqs(struct pci_dev *dev)
353{ 386{
354 struct msi_desc *entry, *tmp; 387 struct msi_desc *entry, *tmp;
355 struct attribute **msi_attrs; 388 struct attribute **msi_attrs;
356 struct device_attribute *dev_attr; 389 struct device_attribute *dev_attr;
357 int count = 0; 390 int i, count = 0;
358 391
359 list_for_each_entry(entry, &dev->msi_list, list) { 392 list_for_each_entry(entry, &dev->msi_list, list)
360 int i, nvec; 393 if (entry->irq)
361 if (!entry->irq) 394 for (i = 0; i < entry->nvec_used; i++)
362 continue; 395 BUG_ON(irq_has_action(entry->irq + i));
363 if (entry->nvec_used)
364 nvec = entry->nvec_used;
365 else
366 nvec = 1 << entry->msi_attrib.multiple;
367 for (i = 0; i < nvec; i++)
368 BUG_ON(irq_has_action(entry->irq + i));
369 }
370 396
371 arch_teardown_msi_irqs(dev); 397 pci_msi_teardown_msi_irqs(dev);
372 398
373 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 399 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
374 if (entry->msi_attrib.is_msix) { 400 if (entry->msi_attrib.is_msix) {
@@ -451,9 +477,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
451 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); 477 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
452 478
453 arch_restore_msi_irqs(dev); 479 arch_restore_msi_irqs(dev);
454 list_for_each_entry(entry, &dev->msi_list, list) { 480 list_for_each_entry(entry, &dev->msi_list, list)
455 msix_mask_irq(entry, entry->masked); 481 msix_mask_irq(entry, entry->masked);
456 }
457 482
458 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 483 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
459} 484}
@@ -497,9 +522,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
497 int count = 0; 522 int count = 0;
498 523
499 /* Determine how many msi entries we have */ 524 /* Determine how many msi entries we have */
500 list_for_each_entry(entry, &pdev->msi_list, list) { 525 list_for_each_entry(entry, &pdev->msi_list, list)
501 ++num_msi; 526 ++num_msi;
502 }
503 if (!num_msi) 527 if (!num_msi)
504 return 0; 528 return 0;
505 529
@@ -559,7 +583,7 @@ error_attrs:
559 return ret; 583 return ret;
560} 584}
561 585
562static struct msi_desc *msi_setup_entry(struct pci_dev *dev) 586static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
563{ 587{
564 u16 control; 588 u16 control;
565 struct msi_desc *entry; 589 struct msi_desc *entry;
@@ -577,6 +601,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
577 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); 601 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
578 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 602 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
579 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; 603 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
604 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
605 entry->nvec_used = nvec;
580 606
581 if (control & PCI_MSI_FLAGS_64BIT) 607 if (control & PCI_MSI_FLAGS_64BIT)
582 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; 608 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
@@ -623,7 +649,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
623 649
624 msi_set_enable(dev, 0); /* Disable MSI during set up */ 650 msi_set_enable(dev, 0); /* Disable MSI during set up */
625 651
626 entry = msi_setup_entry(dev); 652 entry = msi_setup_entry(dev, nvec);
627 if (!entry) 653 if (!entry)
628 return -ENOMEM; 654 return -ENOMEM;
629 655
@@ -634,7 +660,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
634 list_add_tail(&entry->list, &dev->msi_list); 660 list_add_tail(&entry->list, &dev->msi_list);
635 661
636 /* Configure MSI capability structure */ 662 /* Configure MSI capability structure */
637 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 663 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
638 if (ret) { 664 if (ret) {
639 msi_mask_irq(entry, mask, ~mask); 665 msi_mask_irq(entry, mask, ~mask);
640 free_msi_irqs(dev); 666 free_msi_irqs(dev);
@@ -701,6 +727,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
701 entry->msi_attrib.entry_nr = entries[i].entry; 727 entry->msi_attrib.entry_nr = entries[i].entry;
702 entry->msi_attrib.default_irq = dev->irq; 728 entry->msi_attrib.default_irq = dev->irq;
703 entry->mask_base = base; 729 entry->mask_base = base;
730 entry->nvec_used = 1;
704 731
705 list_add_tail(&entry->list, &dev->msi_list); 732 list_add_tail(&entry->list, &dev->msi_list);
706 } 733 }
@@ -719,7 +746,6 @@ static void msix_program_entries(struct pci_dev *dev,
719 PCI_MSIX_ENTRY_VECTOR_CTRL; 746 PCI_MSIX_ENTRY_VECTOR_CTRL;
720 747
721 entries[i].vector = entry->irq; 748 entries[i].vector = entry->irq;
722 irq_set_msi_desc(entry->irq, entry);
723 entry->masked = readl(entry->mask_base + offset); 749 entry->masked = readl(entry->mask_base + offset);
724 msix_mask_irq(entry, 1); 750 msix_mask_irq(entry, 1);
725 i++; 751 i++;
@@ -756,7 +782,7 @@ static int msix_capability_init(struct pci_dev *dev,
756 if (ret) 782 if (ret)
757 return ret; 783 return ret;
758 784
759 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 785 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
760 if (ret) 786 if (ret)
761 goto out_avail; 787 goto out_avail;
762 788
@@ -895,7 +921,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
895 /* Return the device with MSI unmasked as initial states */ 921 /* Return the device with MSI unmasked as initial states */
896 mask = msi_mask(desc->msi_attrib.multi_cap); 922 mask = msi_mask(desc->msi_attrib.multi_cap);
897 /* Keep cached state to be restored */ 923 /* Keep cached state to be restored */
898 arch_msi_mask_irq(desc, mask, ~mask); 924 __pci_msi_desc_mask_irq(desc, mask, ~mask);
899 925
900 /* Restore dev->irq to its default pin-assertion irq */ 926 /* Restore dev->irq to its default pin-assertion irq */
901 dev->irq = desc->msi_attrib.default_irq; 927 dev->irq = desc->msi_attrib.default_irq;
@@ -993,7 +1019,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
993 /* Return the device with MSI-X masked as initial states */ 1019 /* Return the device with MSI-X masked as initial states */
994 list_for_each_entry(entry, &dev->msi_list, list) { 1020 list_for_each_entry(entry, &dev->msi_list, list) {
995 /* Keep cached states to be restored */ 1021 /* Keep cached states to be restored */
996 arch_msix_mask_irq(entry, 1); 1022 __pci_msix_desc_mask_irq(entry, 1);
997 } 1023 }
998 1024
999 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 1025 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
@@ -1138,3 +1164,197 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1138 return nvec; 1164 return nvec;
1139} 1165}
1140EXPORT_SYMBOL(pci_enable_msix_range); 1166EXPORT_SYMBOL(pci_enable_msix_range);
1167
1168#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
1169/**
1170 * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
1171 * @irq_data: Pointer to interrupt data of the MSI interrupt
1172 * @msg: Pointer to the message
1173 */
1174void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
1175{
1176 struct msi_desc *desc = irq_data->msi_desc;
1177
1178 /*
1179 * For MSI-X desc->irq is always equal to irq_data->irq. For
1180 * MSI only the first interrupt of MULTI MSI passes the test.
1181 */
1182 if (desc->irq == irq_data->irq)
1183 __pci_write_msi_msg(desc, msg);
1184}
1185
1186/**
1187 * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
1188 * @dev: Pointer to the PCI device
1189 * @desc: Pointer to the msi descriptor
1190 *
1191 * The ID number is only used within the irqdomain.
1192 */
1193irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
1194 struct msi_desc *desc)
1195{
1196 return (irq_hw_number_t)desc->msi_attrib.entry_nr |
1197 PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
1198 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
1199}
1200
1201static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
1202{
1203 return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
1204}
1205
1206/**
1207 * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev
1208 * @domain: The interrupt domain to check
1209 * @info: The domain info for verification
1210 * @dev: The device to check
1211 *
1212 * Returns:
1213 * 0 if the functionality is supported
1214 * 1 if Multi MSI is requested, but the domain does not support it
1215 * -ENOTSUPP otherwise
1216 */
1217int pci_msi_domain_check_cap(struct irq_domain *domain,
1218 struct msi_domain_info *info, struct device *dev)
1219{
1220 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
1221
1222 /* Special handling to support pci_enable_msi_range() */
1223 if (pci_msi_desc_is_multi_msi(desc) &&
1224 !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
1225 return 1;
1226 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
1227 return -ENOTSUPP;
1228
1229 return 0;
1230}
1231
1232static int pci_msi_domain_handle_error(struct irq_domain *domain,
1233 struct msi_desc *desc, int error)
1234{
1235 /* Special handling to support pci_enable_msi_range() */
1236 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
1237 return 1;
1238
1239 return error;
1240}
1241
1242#ifdef GENERIC_MSI_DOMAIN_OPS
1243static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
1244 struct msi_desc *desc)
1245{
1246 arg->desc = desc;
1247 arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc),
1248 desc);
1249}
1250#else
1251#define pci_msi_domain_set_desc NULL
1252#endif
1253
1254static struct msi_domain_ops pci_msi_domain_ops_default = {
1255 .set_desc = pci_msi_domain_set_desc,
1256 .msi_check = pci_msi_domain_check_cap,
1257 .handle_error = pci_msi_domain_handle_error,
1258};
1259
1260static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
1261{
1262 struct msi_domain_ops *ops = info->ops;
1263
1264 if (ops == NULL) {
1265 info->ops = &pci_msi_domain_ops_default;
1266 } else {
1267 if (ops->set_desc == NULL)
1268 ops->set_desc = pci_msi_domain_set_desc;
1269 if (ops->msi_check == NULL)
1270 ops->msi_check = pci_msi_domain_check_cap;
1271 if (ops->handle_error == NULL)
1272 ops->handle_error = pci_msi_domain_handle_error;
1273 }
1274}
1275
1276static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
1277{
1278 struct irq_chip *chip = info->chip;
1279
1280 BUG_ON(!chip);
1281 if (!chip->irq_write_msi_msg)
1282 chip->irq_write_msi_msg = pci_msi_domain_write_msg;
1283}
1284
1285/**
1286 * pci_msi_create_irq_domain - Creat a MSI interrupt domain
1287 * @node: Optional device-tree node of the interrupt controller
1288 * @info: MSI domain info
1289 * @parent: Parent irq domain
1290 *
1291 * Updates the domain and chip ops and creates a MSI interrupt domain.
1292 *
1293 * Returns:
1294 * A domain pointer or NULL in case of failure.
1295 */
1296struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
1297 struct msi_domain_info *info,
1298 struct irq_domain *parent)
1299{
1300 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
1301 pci_msi_domain_update_dom_ops(info);
1302 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
1303 pci_msi_domain_update_chip_ops(info);
1304
1305 return msi_create_irq_domain(node, info, parent);
1306}
1307
1308/**
1309 * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain
1310 * @domain: The interrupt domain to allocate from
1311 * @dev: The device for which to allocate
1312 * @nvec: The number of interrupts to allocate
1313 * @type: Unused to allow simpler migration from the arch_XXX interfaces
1314 *
1315 * Returns:
1316 * A virtual interrupt number or an error code in case of failure
1317 */
1318int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
1319 int nvec, int type)
1320{
1321 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
1322}
1323
1324/**
1325 * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain
1326 * @domain: The interrupt domain
1327 * @dev: The device for which to free interrupts
1328 */
1329void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
1330{
1331 msi_domain_free_irqs(domain, &dev->dev);
1332}
1333
1334/**
1335 * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
1336 * @node: Optional device-tree node of the interrupt controller
1337 * @info: MSI domain info
1338 * @parent: Parent irq domain
1339 *
1340 * Returns: A domain pointer or NULL in case of failure. If successful
1341 * the default PCI/MSI irqdomain pointer is updated.
1342 */
1343struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
1344 struct msi_domain_info *info, struct irq_domain *parent)
1345{
1346 struct irq_domain *domain;
1347
1348 mutex_lock(&pci_msi_domain_lock);
1349 if (pci_msi_default_domain) {
1350 pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
1351 domain = NULL;
1352 } else {
1353 domain = pci_msi_create_irq_domain(node, info, parent);
1354 pci_msi_default_domain = domain;
1355 }
1356 mutex_unlock(&pci_msi_domain_lock);
1357
1358 return domain;
1359}
1360#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 553212f037c3..e8d695b3f54e 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -560,7 +560,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
560 struct msi_msg msg; 560 struct msi_msg msg;
561 561
562 get_cached_msi_msg(irq, &msg); 562 get_cached_msi_msg(irq, &msg);
563 write_msi_msg(irq, &msg); 563 pci_write_msi_msg(irq, &msg);
564 } 564 }
565 565
566 ret = request_irq(irq, vfio_msihandler, 0, 566 ret = request_irq(irq, vfio_msihandler, 0,
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
new file mode 100644
index 000000000000..61c58d8878ce
--- /dev/null
+++ b/include/asm-generic/msi.h
@@ -0,0 +1,32 @@
1#ifndef __ASM_GENERIC_MSI_H
2#define __ASM_GENERIC_MSI_H
3
4#include <linux/types.h>
5
6#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
7# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
8#endif
9
10struct msi_desc;
11
12/**
13 * struct msi_alloc_info - Default structure for MSI interrupt allocation.
14 * @desc: Pointer to msi descriptor
15 * @hwirq: Associated hw interrupt number in the domain
16 * @scratchpad: Storage for implementation specific scratch data
17 *
18 * Architectures can provide their own implementation by not including
19 * asm-generic/msi.h into their arch specific header file.
20 */
21typedef struct msi_alloc_info {
22 struct msi_desc *desc;
23 irq_hw_number_t hwirq;
24 union {
25 unsigned long ul;
26 void *ptr;
27 } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
28} msi_alloc_info_t;
29
30#define GENERIC_MSI_DOMAIN_OPS 1
31
32#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8588e5efe577..d09ec7a1243e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -15,6 +15,7 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/irqhandler.h>
18#include <linux/irqreturn.h> 19#include <linux/irqreturn.h>
19#include <linux/irqnr.h> 20#include <linux/irqnr.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
@@ -28,11 +29,7 @@
28 29
29struct seq_file; 30struct seq_file;
30struct module; 31struct module;
31struct irq_desc; 32struct msi_msg;
32struct irq_data;
33typedef void (*irq_flow_handler_t)(unsigned int irq,
34 struct irq_desc *desc);
35typedef void (*irq_preflow_handler_t)(struct irq_data *data);
36 33
37/* 34/*
38 * IRQ line status. 35 * IRQ line status.
@@ -114,10 +111,14 @@ enum {
114 * 111 *
115 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity 112 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
116 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity 113 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
114 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
115 * support stacked irqchips, which indicates skipping
116 * all descendent irqchips.
117 */ 117 */
118enum { 118enum {
119 IRQ_SET_MASK_OK = 0, 119 IRQ_SET_MASK_OK = 0,
120 IRQ_SET_MASK_OK_NOCOPY, 120 IRQ_SET_MASK_OK_NOCOPY,
121 IRQ_SET_MASK_OK_DONE,
121}; 122};
122 123
123struct msi_desc; 124struct msi_desc;
@@ -134,6 +135,8 @@ struct irq_domain;
134 * @chip: low level interrupt hardware access 135 * @chip: low level interrupt hardware access
135 * @domain: Interrupt translation domain; responsible for mapping 136 * @domain: Interrupt translation domain; responsible for mapping
136 * between hwirq number and linux irq number. 137 * between hwirq number and linux irq number.
138 * @parent_data: pointer to parent struct irq_data to support hierarchy
139 * irq_domain
137 * @handler_data: per-IRQ data for the irq_chip methods 140 * @handler_data: per-IRQ data for the irq_chip methods
138 * @chip_data: platform-specific per-chip private data for the chip 141 * @chip_data: platform-specific per-chip private data for the chip
139 * methods, to allow shared chip implementations 142 * methods, to allow shared chip implementations
@@ -152,6 +155,9 @@ struct irq_data {
152 unsigned int state_use_accessors; 155 unsigned int state_use_accessors;
153 struct irq_chip *chip; 156 struct irq_chip *chip;
154 struct irq_domain *domain; 157 struct irq_domain *domain;
158#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
159 struct irq_data *parent_data;
160#endif
155 void *handler_data; 161 void *handler_data;
156 void *chip_data; 162 void *chip_data;
157 struct msi_desc *msi_desc; 163 struct msi_desc *msi_desc;
@@ -316,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
316 * any other callback related to this irq 322 * any other callback related to this irq
317 * @irq_release_resources: optional to release resources acquired with 323 * @irq_release_resources: optional to release resources acquired with
318 * irq_request_resources 324 * irq_request_resources
325 * @irq_compose_msi_msg: optional to compose message content for MSI
326 * @irq_write_msi_msg: optional to write message content for MSI
319 * @flags: chip specific flags 327 * @flags: chip specific flags
320 */ 328 */
321struct irq_chip { 329struct irq_chip {
@@ -352,6 +360,9 @@ struct irq_chip {
352 int (*irq_request_resources)(struct irq_data *data); 360 int (*irq_request_resources)(struct irq_data *data);
353 void (*irq_release_resources)(struct irq_data *data); 361 void (*irq_release_resources)(struct irq_data *data);
354 362
363 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
364 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
365
355 unsigned long flags; 366 unsigned long flags;
356}; 367};
357 368
@@ -439,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
439extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 450extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
440extern void handle_nested_irq(unsigned int irq); 451extern void handle_nested_irq(unsigned int irq);
441 452
453extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
454#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
455extern void irq_chip_ack_parent(struct irq_data *data);
456extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
457extern void irq_chip_mask_parent(struct irq_data *data);
458extern void irq_chip_unmask_parent(struct irq_data *data);
459extern void irq_chip_eoi_parent(struct irq_data *data);
460extern int irq_chip_set_affinity_parent(struct irq_data *data,
461 const struct cpumask *dest,
462 bool force);
463#endif
464
442/* Handling of unhandled and spurious interrupts: */ 465/* Handling of unhandled and spurious interrupts: */
443extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 466extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
444 irqreturn_t action_ret); 467 irqreturn_t action_ret);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b0f9d16e48f6..676d7306a360 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -33,11 +33,14 @@
33#define _LINUX_IRQDOMAIN_H 33#define _LINUX_IRQDOMAIN_H
34 34
35#include <linux/types.h> 35#include <linux/types.h>
36#include <linux/irqhandler.h>
36#include <linux/radix-tree.h> 37#include <linux/radix-tree.h>
37 38
38struct device_node; 39struct device_node;
39struct irq_domain; 40struct irq_domain;
40struct of_device_id; 41struct of_device_id;
42struct irq_chip;
43struct irq_data;
41 44
42/* Number of irqs reserved for a legacy isa controller */ 45/* Number of irqs reserved for a legacy isa controller */
43#define NUM_ISA_INTERRUPTS 16 46#define NUM_ISA_INTERRUPTS 16
@@ -64,6 +67,16 @@ struct irq_domain_ops {
64 int (*xlate)(struct irq_domain *d, struct device_node *node, 67 int (*xlate)(struct irq_domain *d, struct device_node *node,
65 const u32 *intspec, unsigned int intsize, 68 const u32 *intspec, unsigned int intsize,
66 unsigned long *out_hwirq, unsigned int *out_type); 69 unsigned long *out_hwirq, unsigned int *out_type);
70
71#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
72 /* extended V2 interfaces to support hierarchy irq_domains */
73 int (*alloc)(struct irq_domain *d, unsigned int virq,
74 unsigned int nr_irqs, void *arg);
75 void (*free)(struct irq_domain *d, unsigned int virq,
76 unsigned int nr_irqs);
77 void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
78 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
79#endif
67}; 80};
68 81
69extern struct irq_domain_ops irq_generic_chip_ops; 82extern struct irq_domain_ops irq_generic_chip_ops;
@@ -77,6 +90,7 @@ struct irq_domain_chip_generic;
77 * @ops: pointer to irq_domain methods 90 * @ops: pointer to irq_domain methods
78 * @host_data: private data pointer for use by owner. Not touched by irq_domain 91 * @host_data: private data pointer for use by owner. Not touched by irq_domain
79 * core code. 92 * core code.
93 * @flags: host per irq_domain flags
80 * 94 *
81 * Optional elements 95 * Optional elements
82 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used 96 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
@@ -84,6 +98,7 @@ struct irq_domain_chip_generic;
84 * @gc: Pointer to a list of generic chips. There is a helper function for 98 * @gc: Pointer to a list of generic chips. There is a helper function for
85 * setting up one or more generic chips for interrupt controllers 99 * setting up one or more generic chips for interrupt controllers
86 * drivers using the generic chip library which uses this pointer. 100 * drivers using the generic chip library which uses this pointer.
101 * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
87 * 102 *
88 * Revmap data, used internally by irq_domain 103 * Revmap data, used internally by irq_domain
89 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that 104 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
@@ -97,10 +112,14 @@ struct irq_domain {
97 const char *name; 112 const char *name;
98 const struct irq_domain_ops *ops; 113 const struct irq_domain_ops *ops;
99 void *host_data; 114 void *host_data;
115 unsigned int flags;
100 116
101 /* Optional data */ 117 /* Optional data */
102 struct device_node *of_node; 118 struct device_node *of_node;
103 struct irq_domain_chip_generic *gc; 119 struct irq_domain_chip_generic *gc;
120#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
121 struct irq_domain *parent;
122#endif
104 123
105 /* reverse map data. The linear map gets appended to the irq_domain */ 124 /* reverse map data. The linear map gets appended to the irq_domain */
106 irq_hw_number_t hwirq_max; 125 irq_hw_number_t hwirq_max;
@@ -110,6 +129,22 @@ struct irq_domain {
110 unsigned int linear_revmap[]; 129 unsigned int linear_revmap[];
111}; 130};
112 131
132/* Irq domain flags */
133enum {
134 /* Irq domain is hierarchical */
135 IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
136
137 /* Core calls alloc/free recursive through the domain hierarchy. */
138 IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
139
140 /*
141 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
142 * for implementation specific purposes and ignored by the
143 * core code.
144 */
145 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
146};
147
113#ifdef CONFIG_IRQ_DOMAIN 148#ifdef CONFIG_IRQ_DOMAIN
114struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 149struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
115 irq_hw_number_t hwirq_max, int direct_max, 150 irq_hw_number_t hwirq_max, int direct_max,
@@ -220,8 +255,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
220 const u32 *intspec, unsigned int intsize, 255 const u32 *intspec, unsigned int intsize,
221 irq_hw_number_t *out_hwirq, unsigned int *out_type); 256 irq_hw_number_t *out_hwirq, unsigned int *out_type);
222 257
258/* V2 interfaces to support hierarchy IRQ domains. */
259extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
260 unsigned int virq);
261#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
262extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
263 unsigned int flags, unsigned int size,
264 struct device_node *node,
265 const struct irq_domain_ops *ops, void *host_data);
266extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
267 unsigned int nr_irqs, int node, void *arg,
268 bool realloc);
269extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
270extern void irq_domain_activate_irq(struct irq_data *irq_data);
271extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
272
273static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
274 unsigned int nr_irqs, int node, void *arg)
275{
276 return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
277}
278
279extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
280 unsigned int virq,
281 irq_hw_number_t hwirq,
282 struct irq_chip *chip,
283 void *chip_data);
284extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
285 irq_hw_number_t hwirq, struct irq_chip *chip,
286 void *chip_data, irq_flow_handler_t handler,
287 void *handler_data, const char *handler_name);
288extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
289extern void irq_domain_free_irqs_common(struct irq_domain *domain,
290 unsigned int virq,
291 unsigned int nr_irqs);
292extern void irq_domain_free_irqs_top(struct irq_domain *domain,
293 unsigned int virq, unsigned int nr_irqs);
294
295extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
296 unsigned int irq_base,
297 unsigned int nr_irqs, void *arg);
298
299extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
300 unsigned int irq_base,
301 unsigned int nr_irqs);
302
303static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
304{
305 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
306}
307#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
308static inline void irq_domain_activate_irq(struct irq_data *data) { }
309static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
310static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
311 unsigned int nr_irqs, int node, void *arg)
312{
313 return -1;
314}
315
316static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
317{
318 return false;
319}
320#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
321
223#else /* CONFIG_IRQ_DOMAIN */ 322#else /* CONFIG_IRQ_DOMAIN */
224static inline void irq_dispose_mapping(unsigned int virq) { } 323static inline void irq_dispose_mapping(unsigned int virq) { }
324static inline void irq_domain_activate_irq(struct irq_data *data) { }
325static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
225#endif /* !CONFIG_IRQ_DOMAIN */ 326#endif /* !CONFIG_IRQ_DOMAIN */
226 327
227#endif /* _LINUX_IRQDOMAIN_H */ 328#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
new file mode 100644
index 000000000000..62d543004197
--- /dev/null
+++ b/include/linux/irqhandler.h
@@ -0,0 +1,14 @@
1#ifndef _LINUX_IRQHANDLER_H
2#define _LINUX_IRQHANDLER_H
3
4/*
5 * Interrupt flow handler typedefs are defined here to avoid circular
6 * include dependencies.
7 */
8
9struct irq_desc;
10struct irq_data;
11typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
12typedef void (*irq_preflow_handler_t)(struct irq_data *data);
13
14#endif
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 44f4746d033b..8ac4a68ffae2 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,17 +10,12 @@ struct msi_msg {
10 u32 data; /* 16 bits of msi message data */ 10 u32 data; /* 16 bits of msi message data */
11}; 11};
12 12
13extern int pci_msi_ignore_mask;
13/* Helper functions */ 14/* Helper functions */
14struct irq_data; 15struct irq_data;
15struct msi_desc; 16struct msi_desc;
16void mask_msi_irq(struct irq_data *data);
17void unmask_msi_irq(struct irq_data *data);
18void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 17void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
20void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21void read_msi_msg(unsigned int irq, struct msi_msg *msg);
22void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 18void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23void write_msi_msg(unsigned int irq, struct msi_msg *msg);
24 19
25struct msi_desc { 20struct msi_desc {
26 struct { 21 struct {
@@ -48,6 +43,52 @@ struct msi_desc {
48 struct msi_msg msg; 43 struct msi_msg msg;
49}; 44};
50 45
46/* Helpers to hide struct msi_desc implementation details */
47#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
48#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
49#define first_msi_entry(dev) \
50 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
51#define for_each_msi_entry(desc, dev) \
52 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
53
54#ifdef CONFIG_PCI_MSI
55#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
56#define for_each_pci_msi_entry(desc, pdev) \
57 for_each_msi_entry((desc), &(pdev)->dev)
58
59static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
60{
61 return desc->dev;
62}
63#endif /* CONFIG_PCI_MSI */
64
65void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
66void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
67void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
68
69u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
70u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
71void pci_msi_mask_irq(struct irq_data *data);
72void pci_msi_unmask_irq(struct irq_data *data);
73
74/* Conversion helpers. Should be removed after merging */
75static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
76{
77 __pci_write_msi_msg(entry, msg);
78}
79static inline void write_msi_msg(int irq, struct msi_msg *msg)
80{
81 pci_write_msi_msg(irq, msg);
82}
83static inline void mask_msi_irq(struct irq_data *data)
84{
85 pci_msi_mask_irq(data);
86}
87static inline void unmask_msi_irq(struct irq_data *data)
88{
89 pci_msi_unmask_irq(data);
90}
91
51/* 92/*
52 * The arch hooks to setup up msi irqs. Those functions are 93 * The arch hooks to setup up msi irqs. Those functions are
53 * implemented as weak symbols so that they /can/ be overriden by 94 * implemented as weak symbols so that they /can/ be overriden by
@@ -61,18 +102,142 @@ void arch_restore_msi_irqs(struct pci_dev *dev);
61 102
62void default_teardown_msi_irqs(struct pci_dev *dev); 103void default_teardown_msi_irqs(struct pci_dev *dev);
63void default_restore_msi_irqs(struct pci_dev *dev); 104void default_restore_msi_irqs(struct pci_dev *dev);
64u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
65u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
66 105
67struct msi_chip { 106struct msi_controller {
68 struct module *owner; 107 struct module *owner;
69 struct device *dev; 108 struct device *dev;
70 struct device_node *of_node; 109 struct device_node *of_node;
71 struct list_head list; 110 struct list_head list;
111#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
112 struct irq_domain *domain;
113#endif
72 114
73 int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, 115 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
74 struct msi_desc *desc); 116 struct msi_desc *desc);
75 void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); 117 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
118};
119
120#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
121
122#include <linux/irqhandler.h>
123#include <asm/msi.h>
124
125struct irq_domain;
126struct irq_chip;
127struct device_node;
128struct msi_domain_info;
129
130/**
131 * struct msi_domain_ops - MSI interrupt domain callbacks
132 * @get_hwirq: Retrieve the resulting hw irq number
133 * @msi_init: Domain specific init function for MSI interrupts
134 * @msi_free: Domain specific function to free a MSI interrupts
135 * @msi_check: Callback for verification of the domain/info/dev data
136 * @msi_prepare: Prepare the allocation of the interrupts in the domain
137 * @msi_finish: Optional callbacl to finalize the allocation
138 * @set_desc: Set the msi descriptor for an interrupt
139 * @handle_error: Optional error handler if the allocation fails
140 *
141 * @get_hwirq, @msi_init and @msi_free are callbacks used by
142 * msi_create_irq_domain() and related interfaces
143 *
144 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
145 * are callbacks used by msi_irq_domain_alloc_irqs() and related
146 * interfaces which are based on msi_desc.
147 */
148struct msi_domain_ops {
149 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
150 msi_alloc_info_t *arg);
151 int (*msi_init)(struct irq_domain *domain,
152 struct msi_domain_info *info,
153 unsigned int virq, irq_hw_number_t hwirq,
154 msi_alloc_info_t *arg);
155 void (*msi_free)(struct irq_domain *domain,
156 struct msi_domain_info *info,
157 unsigned int virq);
158 int (*msi_check)(struct irq_domain *domain,
159 struct msi_domain_info *info,
160 struct device *dev);
161 int (*msi_prepare)(struct irq_domain *domain,
162 struct device *dev, int nvec,
163 msi_alloc_info_t *arg);
164 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
165 void (*set_desc)(msi_alloc_info_t *arg,
166 struct msi_desc *desc);
167 int (*handle_error)(struct irq_domain *domain,
168 struct msi_desc *desc, int error);
169};
170
171/**
172 * struct msi_domain_info - MSI interrupt domain data
173 * @flags: Flags to decribe features and capabilities
174 * @ops: The callback data structure
175 * @chip: Optional: associated interrupt chip
176 * @chip_data: Optional: associated interrupt chip data
177 * @handler: Optional: associated interrupt flow handler
178 * @handler_data: Optional: associated interrupt flow handler data
179 * @handler_name: Optional: associated interrupt flow handler name
180 * @data: Optional: domain specific data
181 */
182struct msi_domain_info {
183 u32 flags;
184 struct msi_domain_ops *ops;
185 struct irq_chip *chip;
186 void *chip_data;
187 irq_flow_handler_t handler;
188 void *handler_data;
189 const char *handler_name;
190 void *data;
191};
192
193/* Flags for msi_domain_info */
194enum {
195 /*
196 * Init non implemented ops callbacks with default MSI domain
197 * callbacks.
198 */
199 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
200 /*
201 * Init non implemented chip callbacks with default MSI chip
202 * callbacks.
203 */
204 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
205 /* Build identity map between hwirq and irq */
206 MSI_FLAG_IDENTITY_MAP = (1 << 2),
207 /* Support multiple PCI MSI interrupts */
208 MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
209 /* Support PCI MSIX interrupts */
210 MSI_FLAG_PCI_MSIX = (1 << 4),
76}; 211};
77 212
213int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
214 bool force);
215
216struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
217 struct msi_domain_info *info,
218 struct irq_domain *parent);
219int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
220 int nvec);
221void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
222struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
223
224#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
225
226#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
227void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
228struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
229 struct msi_domain_info *info,
230 struct irq_domain *parent);
231int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
232 int nvec, int type);
233void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
234struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
235 struct msi_domain_info *info, struct irq_domain *parent);
236
237irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
238 struct msi_desc *desc);
239int pci_msi_domain_check_cap(struct irq_domain *domain,
240 struct msi_domain_info *info, struct device *dev);
241#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
242
78#endif /* LINUX_MSI_H */ 243#endif /* LINUX_MSI_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 1fd207e7a847..ce0e5abeb454 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -59,13 +59,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
59#endif 59#endif
60 60
61#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) 61#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
62int of_pci_msi_chip_add(struct msi_chip *chip); 62int of_pci_msi_chip_add(struct msi_controller *chip);
63void of_pci_msi_chip_remove(struct msi_chip *chip); 63void of_pci_msi_chip_remove(struct msi_controller *chip);
64struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); 64struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
65#else 65#else
66static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } 66static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
67static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } 67static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
68static inline struct msi_chip * 68static inline struct msi_controller *
69of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } 69of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
70#endif 70#endif
71 71
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4c8ac5fcc224..a523cee3abb5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -450,7 +450,7 @@ struct pci_bus {
450 struct resource busn_res; /* bus numbers routed to this bus */ 450 struct resource busn_res; /* bus numbers routed to this bus */
451 451
452 struct pci_ops *ops; /* configuration access functions */ 452 struct pci_ops *ops; /* configuration access functions */
453 struct msi_chip *msi; /* MSI controller */ 453 struct msi_controller *msi; /* MSI controller */
454 void *sysdata; /* hook for sys-specific extension */ 454 void *sysdata; /* hook for sys-specific extension */
455 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ 455 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
456 456
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 225086b2652e..9a76e3beda54 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -55,6 +55,21 @@ config GENERIC_IRQ_CHIP
55config IRQ_DOMAIN 55config IRQ_DOMAIN
56 bool 56 bool
57 57
58# Support for hierarchical irq domains
59config IRQ_DOMAIN_HIERARCHY
60 bool
61 select IRQ_DOMAIN
62
63# Generic MSI interrupt support
64config GENERIC_MSI_IRQ
65 bool
66
67# Generic MSI hierarchical interrupt domain support
68config GENERIC_MSI_IRQ_DOMAIN
69 bool
70 select IRQ_DOMAIN_HIERARCHY
71 select GENERIC_MSI_IRQ
72
58config HANDLE_DOMAIN_IRQ 73config HANDLE_DOMAIN_IRQ
59 bool 74 bool
60 75
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index fff17381f0af..d12123526e2b 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
6obj-$(CONFIG_PROC_FS) += proc.o 6obj-$(CONFIG_PROC_FS) += proc.o
7obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 7obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
8obj-$(CONFIG_PM_SLEEP) += pm.o 8obj-$(CONFIG_PM_SLEEP) += pm.o
9obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e5202f00cabc..6f1c7a566b95 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/irqdomain.h>
18 19
19#include <trace/events/irq.h> 20#include <trace/events/irq.h>
20 21
@@ -178,6 +179,7 @@ int irq_startup(struct irq_desc *desc, bool resend)
178 irq_state_clr_disabled(desc); 179 irq_state_clr_disabled(desc);
179 desc->depth = 0; 180 desc->depth = 0;
180 181
182 irq_domain_activate_irq(&desc->irq_data);
181 if (desc->irq_data.chip->irq_startup) { 183 if (desc->irq_data.chip->irq_startup) {
182 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 184 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
183 irq_state_clr_masked(desc); 185 irq_state_clr_masked(desc);
@@ -199,6 +201,7 @@ void irq_shutdown(struct irq_desc *desc)
199 desc->irq_data.chip->irq_disable(&desc->irq_data); 201 desc->irq_data.chip->irq_disable(&desc->irq_data);
200 else 202 else
201 desc->irq_data.chip->irq_mask(&desc->irq_data); 203 desc->irq_data.chip->irq_mask(&desc->irq_data);
204 irq_domain_deactivate_irq(&desc->irq_data);
202 irq_state_set_masked(desc); 205 irq_state_set_masked(desc);
203} 206}
204 207
@@ -728,7 +731,30 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
728 if (!handle) { 731 if (!handle) {
729 handle = handle_bad_irq; 732 handle = handle_bad_irq;
730 } else { 733 } else {
731 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 734 struct irq_data *irq_data = &desc->irq_data;
735#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
736 /*
737 * With hierarchical domains we might run into a
738 * situation where the outermost chip is not yet set
739 * up, but the inner chips are there. Instead of
740 * bailing we install the handler, but obviously we
741 * cannot enable/startup the interrupt at this point.
742 */
743 while (irq_data) {
744 if (irq_data->chip != &no_irq_chip)
745 break;
746 /*
747 * Bail out if the outer chip is not set up
748 * and the interrrupt supposed to be started
749 * right away.
750 */
751 if (WARN_ON(is_chained))
752 goto out;
753 /* Try the parent */
754 irq_data = irq_data->parent_data;
755 }
756#endif
757 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
732 goto out; 758 goto out;
733 } 759 }
734 760
@@ -847,3 +873,105 @@ void irq_cpu_offline(void)
847 raw_spin_unlock_irqrestore(&desc->lock, flags); 873 raw_spin_unlock_irqrestore(&desc->lock, flags);
848 } 874 }
849} 875}
876
877#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
878/**
879 * irq_chip_ack_parent - Acknowledge the parent interrupt
880 * @data: Pointer to interrupt specific data
881 */
882void irq_chip_ack_parent(struct irq_data *data)
883{
884 data = data->parent_data;
885 data->chip->irq_ack(data);
886}
887
888/**
889 * irq_chip_mask_parent - Mask the parent interrupt
890 * @data: Pointer to interrupt specific data
891 */
892void irq_chip_mask_parent(struct irq_data *data)
893{
894 data = data->parent_data;
895 data->chip->irq_mask(data);
896}
897
898/**
899 * irq_chip_unmask_parent - Unmask the parent interrupt
900 * @data: Pointer to interrupt specific data
901 */
902void irq_chip_unmask_parent(struct irq_data *data)
903{
904 data = data->parent_data;
905 data->chip->irq_unmask(data);
906}
907
908/**
909 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
910 * @data: Pointer to interrupt specific data
911 */
912void irq_chip_eoi_parent(struct irq_data *data)
913{
914 data = data->parent_data;
915 data->chip->irq_eoi(data);
916}
917
918/**
919 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
920 * @data: Pointer to interrupt specific data
921 * @dest: The affinity mask to set
922 * @force: Flag to enforce setting (disable online checks)
923 *
924 * Conditinal, as the underlying parent chip might not implement it.
925 */
926int irq_chip_set_affinity_parent(struct irq_data *data,
927 const struct cpumask *dest, bool force)
928{
929 data = data->parent_data;
930 if (data->chip->irq_set_affinity)
931 return data->chip->irq_set_affinity(data, dest, force);
932
933 return -ENOSYS;
934}
935
936/**
937 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
938 * @data: Pointer to interrupt specific data
939 *
940 * Iterate through the domain hierarchy of the interrupt and check
941 * whether a hw retrigger function exists. If yes, invoke it.
942 */
943int irq_chip_retrigger_hierarchy(struct irq_data *data)
944{
945 for (data = data->parent_data; data; data = data->parent_data)
946 if (data->chip && data->chip->irq_retrigger)
947 return data->chip->irq_retrigger(data);
948
949 return -ENOSYS;
950}
951#endif
952
953/**
954 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
955 * @data: Pointer to interrupt specific data
956 * @msg: Pointer to the MSI message
957 *
958 * For hierarchical domains we find the first chip in the hierarchy
959 * which implements the irq_compose_msi_msg callback. For non
960 * hierarchical we use the top level chip.
961 */
962int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
963{
964 struct irq_data *pos = NULL;
965
966#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
967 for (; data; data = data->parent_data)
968#endif
969 if (data->chip && data->chip->irq_compose_msi_msg)
970 pos = data;
971 if (!pos)
972 return -ENOSYS;
973
974 pos->chip->irq_compose_msi_msg(pos, msg);
975
976 return 0;
977}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 6534ff6ce02e..7fac311057b8 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -23,6 +23,10 @@ static DEFINE_MUTEX(irq_domain_mutex);
23static DEFINE_MUTEX(revmap_trees_mutex); 23static DEFINE_MUTEX(revmap_trees_mutex);
24static struct irq_domain *irq_default_domain; 24static struct irq_domain *irq_default_domain;
25 25
26static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
27 irq_hw_number_t hwirq, int node);
28static void irq_domain_check_hierarchy(struct irq_domain *domain);
29
26/** 30/**
27 * __irq_domain_add() - Allocate a new irq_domain data structure 31 * __irq_domain_add() - Allocate a new irq_domain data structure
28 * @of_node: optional device-tree node of the interrupt controller 32 * @of_node: optional device-tree node of the interrupt controller
@@ -30,7 +34,7 @@ static struct irq_domain *irq_default_domain;
30 * @hwirq_max: Maximum number of interrupts supported by controller 34 * @hwirq_max: Maximum number of interrupts supported by controller
31 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 35 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
32 * direct mapping 36 * direct mapping
33 * @ops: map/unmap domain callbacks 37 * @ops: domain callbacks
34 * @host_data: Controller private data pointer 38 * @host_data: Controller private data pointer
35 * 39 *
36 * Allocates and initialize and irq_domain structure. 40 * Allocates and initialize and irq_domain structure.
@@ -56,6 +60,7 @@ struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
56 domain->hwirq_max = hwirq_max; 60 domain->hwirq_max = hwirq_max;
57 domain->revmap_size = size; 61 domain->revmap_size = size;
58 domain->revmap_direct_max_irq = direct_max; 62 domain->revmap_direct_max_irq = direct_max;
63 irq_domain_check_hierarchy(domain);
59 64
60 mutex_lock(&irq_domain_mutex); 65 mutex_lock(&irq_domain_mutex);
61 list_add(&domain->link, &irq_domain_list); 66 list_add(&domain->link, &irq_domain_list);
@@ -109,7 +114,7 @@ EXPORT_SYMBOL_GPL(irq_domain_remove);
109 * @first_irq: first number of irq block assigned to the domain, 114 * @first_irq: first number of irq block assigned to the domain,
110 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then 115 * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
111 * pre-map all of the irqs in the domain to virqs starting at first_irq. 116 * pre-map all of the irqs in the domain to virqs starting at first_irq.
112 * @ops: map/unmap domain callbacks 117 * @ops: domain callbacks
113 * @host_data: Controller private data pointer 118 * @host_data: Controller private data pointer
114 * 119 *
115 * Allocates an irq_domain, and optionally if first_irq is positive then also 120 * Allocates an irq_domain, and optionally if first_irq is positive then also
@@ -174,10 +179,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
174 179
175 domain = __irq_domain_add(of_node, first_hwirq + size, 180 domain = __irq_domain_add(of_node, first_hwirq + size,
176 first_hwirq + size, 0, ops, host_data); 181 first_hwirq + size, 0, ops, host_data);
177 if (!domain) 182 if (domain)
178 return NULL; 183 irq_domain_associate_many(domain, first_irq, first_hwirq, size);
179
180 irq_domain_associate_many(domain, first_irq, first_hwirq, size);
181 184
182 return domain; 185 return domain;
183} 186}
@@ -388,7 +391,6 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
388unsigned int irq_create_mapping(struct irq_domain *domain, 391unsigned int irq_create_mapping(struct irq_domain *domain,
389 irq_hw_number_t hwirq) 392 irq_hw_number_t hwirq)
390{ 393{
391 unsigned int hint;
392 int virq; 394 int virq;
393 395
394 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 396 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
@@ -410,12 +412,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
410 } 412 }
411 413
412 /* Allocate a virtual interrupt number */ 414 /* Allocate a virtual interrupt number */
413 hint = hwirq % nr_irqs; 415 virq = irq_domain_alloc_descs(-1, 1, hwirq,
414 if (hint == 0) 416 of_node_to_nid(domain->of_node));
415 hint++;
416 virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
417 if (virq <= 0)
418 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
419 if (virq <= 0) { 417 if (virq <= 0) {
420 pr_debug("-> virq allocation failed\n"); 418 pr_debug("-> virq allocation failed\n");
421 return 0; 419 return 0;
@@ -471,7 +469,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
471 struct irq_domain *domain; 469 struct irq_domain *domain;
472 irq_hw_number_t hwirq; 470 irq_hw_number_t hwirq;
473 unsigned int type = IRQ_TYPE_NONE; 471 unsigned int type = IRQ_TYPE_NONE;
474 unsigned int virq; 472 int virq;
475 473
476 domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; 474 domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
477 if (!domain) { 475 if (!domain) {
@@ -489,10 +487,24 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
489 return 0; 487 return 0;
490 } 488 }
491 489
492 /* Create mapping */ 490 if (irq_domain_is_hierarchy(domain)) {
493 virq = irq_create_mapping(domain, hwirq); 491 /*
494 if (!virq) 492 * If we've already configured this interrupt,
495 return virq; 493 * don't do it again, or hell will break loose.
494 */
495 virq = irq_find_mapping(domain, hwirq);
496 if (virq)
497 return virq;
498
499 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data);
500 if (virq <= 0)
501 return 0;
502 } else {
503 /* Create mapping */
504 virq = irq_create_mapping(domain, hwirq);
505 if (!virq)
506 return virq;
507 }
496 508
497 /* Set type if specified and different than the current one */ 509 /* Set type if specified and different than the current one */
498 if (type != IRQ_TYPE_NONE && 510 if (type != IRQ_TYPE_NONE &&
@@ -540,8 +552,8 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
540 return 0; 552 return 0;
541 553
542 if (hwirq < domain->revmap_direct_max_irq) { 554 if (hwirq < domain->revmap_direct_max_irq) {
543 data = irq_get_irq_data(hwirq); 555 data = irq_domain_get_irq_data(domain, hwirq);
544 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 556 if (data && data->hwirq == hwirq)
545 return hwirq; 557 return hwirq;
546 } 558 }
547 559
@@ -709,3 +721,518 @@ const struct irq_domain_ops irq_domain_simple_ops = {
709 .xlate = irq_domain_xlate_onetwocell, 721 .xlate = irq_domain_xlate_onetwocell,
710}; 722};
711EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 723EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
724
725static int irq_domain_alloc_descs(int virq, unsigned int cnt,
726 irq_hw_number_t hwirq, int node)
727{
728 unsigned int hint;
729
730 if (virq >= 0) {
731 virq = irq_alloc_descs(virq, virq, cnt, node);
732 } else {
733 hint = hwirq % nr_irqs;
734 if (hint == 0)
735 hint++;
736 virq = irq_alloc_descs_from(hint, cnt, node);
737 if (virq <= 0 && hint > 1)
738 virq = irq_alloc_descs_from(1, cnt, node);
739 }
740
741 return virq;
742}
743
744#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
745/**
746 * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy
747 * @parent: Parent irq domain to associate with the new domain
748 * @flags: Irq domain flags associated to the domain
749 * @size: Size of the domain. See below
750 * @node: Optional device-tree node of the interrupt controller
751 * @ops: Pointer to the interrupt domain callbacks
752 * @host_data: Controller private data pointer
753 *
754 * If @size is 0 a tree domain is created, otherwise a linear domain.
755 *
756 * If successful the parent is associated to the new domain and the
757 * domain flags are set.
758 * Returns pointer to IRQ domain, or NULL on failure.
759 */
760struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
761 unsigned int flags,
762 unsigned int size,
763 struct device_node *node,
764 const struct irq_domain_ops *ops,
765 void *host_data)
766{
767 struct irq_domain *domain;
768
769 if (size)
770 domain = irq_domain_add_linear(node, size, ops, host_data);
771 else
772 domain = irq_domain_add_tree(node, ops, host_data);
773 if (domain) {
774 domain->parent = parent;
775 domain->flags |= flags;
776 }
777
778 return domain;
779}
780
781static void irq_domain_insert_irq(int virq)
782{
783 struct irq_data *data;
784
785 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
786 struct irq_domain *domain = data->domain;
787 irq_hw_number_t hwirq = data->hwirq;
788
789 if (hwirq < domain->revmap_size) {
790 domain->linear_revmap[hwirq] = virq;
791 } else {
792 mutex_lock(&revmap_trees_mutex);
793 radix_tree_insert(&domain->revmap_tree, hwirq, data);
794 mutex_unlock(&revmap_trees_mutex);
795 }
796
797 /* If not already assigned, give the domain the chip's name */
798 if (!domain->name && data->chip)
799 domain->name = data->chip->name;
800 }
801
802 irq_clear_status_flags(virq, IRQ_NOREQUEST);
803}
804
805static void irq_domain_remove_irq(int virq)
806{
807 struct irq_data *data;
808
809 irq_set_status_flags(virq, IRQ_NOREQUEST);
810 irq_set_chip_and_handler(virq, NULL, NULL);
811 synchronize_irq(virq);
812 smp_mb();
813
814 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
815 struct irq_domain *domain = data->domain;
816 irq_hw_number_t hwirq = data->hwirq;
817
818 if (hwirq < domain->revmap_size) {
819 domain->linear_revmap[hwirq] = 0;
820 } else {
821 mutex_lock(&revmap_trees_mutex);
822 radix_tree_delete(&domain->revmap_tree, hwirq);
823 mutex_unlock(&revmap_trees_mutex);
824 }
825 }
826}
827
828static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
829 struct irq_data *child)
830{
831 struct irq_data *irq_data;
832
833 irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, child->node);
834 if (irq_data) {
835 child->parent_data = irq_data;
836 irq_data->irq = child->irq;
837 irq_data->node = child->node;
838 irq_data->domain = domain;
839 }
840
841 return irq_data;
842}
843
844static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
845{
846 struct irq_data *irq_data, *tmp;
847 int i;
848
849 for (i = 0; i < nr_irqs; i++) {
850 irq_data = irq_get_irq_data(virq + i);
851 tmp = irq_data->parent_data;
852 irq_data->parent_data = NULL;
853 irq_data->domain = NULL;
854
855 while (tmp) {
856 irq_data = tmp;
857 tmp = tmp->parent_data;
858 kfree(irq_data);
859 }
860 }
861}
862
863static int irq_domain_alloc_irq_data(struct irq_domain *domain,
864 unsigned int virq, unsigned int nr_irqs)
865{
866 struct irq_data *irq_data;
867 struct irq_domain *parent;
868 int i;
869
870 /* The outermost irq_data is embedded in struct irq_desc */
871 for (i = 0; i < nr_irqs; i++) {
872 irq_data = irq_get_irq_data(virq + i);
873 irq_data->domain = domain;
874
875 for (parent = domain->parent; parent; parent = parent->parent) {
876 irq_data = irq_domain_insert_irq_data(parent, irq_data);
877 if (!irq_data) {
878 irq_domain_free_irq_data(virq, i + 1);
879 return -ENOMEM;
880 }
881 }
882 }
883
884 return 0;
885}
886
887/**
888 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
889 * @domain: domain to match
890 * @virq: IRQ number to get irq_data
891 */
892struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
893 unsigned int virq)
894{
895 struct irq_data *irq_data;
896
897 for (irq_data = irq_get_irq_data(virq); irq_data;
898 irq_data = irq_data->parent_data)
899 if (irq_data->domain == domain)
900 return irq_data;
901
902 return NULL;
903}
904
905/**
906 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
907 * @domain: Interrupt domain to match
908 * @virq: IRQ number
909 * @hwirq: The hwirq number
910 * @chip: The associated interrupt chip
911 * @chip_data: The associated chip data
912 */
913int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
914 irq_hw_number_t hwirq, struct irq_chip *chip,
915 void *chip_data)
916{
917 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
918
919 if (!irq_data)
920 return -ENOENT;
921
922 irq_data->hwirq = hwirq;
923 irq_data->chip = chip ? chip : &no_irq_chip;
924 irq_data->chip_data = chip_data;
925
926 return 0;
927}
928
929/**
930 * irq_domain_set_info - Set the complete data for a @virq in @domain
931 * @domain: Interrupt domain to match
932 * @virq: IRQ number
933 * @hwirq: The hardware interrupt number
934 * @chip: The associated interrupt chip
935 * @chip_data: The associated interrupt chip data
936 * @handler: The interrupt flow handler
937 * @handler_data: The interrupt flow handler data
938 * @handler_name: The interrupt handler name
939 */
940void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
941 irq_hw_number_t hwirq, struct irq_chip *chip,
942 void *chip_data, irq_flow_handler_t handler,
943 void *handler_data, const char *handler_name)
944{
945 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
946 __irq_set_handler(virq, handler, 0, handler_name);
947 irq_set_handler_data(virq, handler_data);
948}
949
950/**
951 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
952 * @irq_data: The pointer to irq_data
953 */
954void irq_domain_reset_irq_data(struct irq_data *irq_data)
955{
956 irq_data->hwirq = 0;
957 irq_data->chip = &no_irq_chip;
958 irq_data->chip_data = NULL;
959}
960
961/**
962 * irq_domain_free_irqs_common - Clear irq_data and free the parent
963 * @domain: Interrupt domain to match
964 * @virq: IRQ number to start with
965 * @nr_irqs: The number of irqs to free
966 */
967void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
968 unsigned int nr_irqs)
969{
970 struct irq_data *irq_data;
971 int i;
972
973 for (i = 0; i < nr_irqs; i++) {
974 irq_data = irq_domain_get_irq_data(domain, virq + i);
975 if (irq_data)
976 irq_domain_reset_irq_data(irq_data);
977 }
978 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
979}
980
981/**
982 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
983 * @domain: Interrupt domain to match
984 * @virq: IRQ number to start with
985 * @nr_irqs: The number of irqs to free
986 */
987void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
988 unsigned int nr_irqs)
989{
990 int i;
991
992 for (i = 0; i < nr_irqs; i++) {
993 irq_set_handler_data(virq + i, NULL);
994 irq_set_handler(virq + i, NULL);
995 }
996 irq_domain_free_irqs_common(domain, virq, nr_irqs);
997}
998
999static bool irq_domain_is_auto_recursive(struct irq_domain *domain)
1000{
1001 return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE;
1002}
1003
1004static void irq_domain_free_irqs_recursive(struct irq_domain *domain,
1005 unsigned int irq_base,
1006 unsigned int nr_irqs)
1007{
1008 domain->ops->free(domain, irq_base, nr_irqs);
1009 if (irq_domain_is_auto_recursive(domain)) {
1010 BUG_ON(!domain->parent);
1011 irq_domain_free_irqs_recursive(domain->parent, irq_base,
1012 nr_irqs);
1013 }
1014}
1015
1016static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
1017 unsigned int irq_base,
1018 unsigned int nr_irqs, void *arg)
1019{
1020 int ret = 0;
1021 struct irq_domain *parent = domain->parent;
1022 bool recursive = irq_domain_is_auto_recursive(domain);
1023
1024 BUG_ON(recursive && !parent);
1025 if (recursive)
1026 ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
1027 nr_irqs, arg);
1028 if (ret >= 0)
1029 ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
1030 if (ret < 0 && recursive)
1031 irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
1032
1033 return ret;
1034}
1035
1036/**
1037 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1038 * @domain: domain to allocate from
1039 * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
1040 * @nr_irqs: number of IRQs to allocate
1041 * @node: NUMA node id for memory allocation
1042 * @arg: domain specific argument
1043 * @realloc: IRQ descriptors have already been allocated if true
1044 *
1045 * Allocate IRQ numbers and initialized all data structures to support
1046 * hierarchy IRQ domains.
1047 * Parameter @realloc is mainly to support legacy IRQs.
1048 * Returns error code or allocated IRQ number
1049 *
1050 * The whole process to setup an IRQ has been split into two steps.
1051 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1052 * descriptor and required hardware resources. The second step,
1053 * irq_domain_activate_irq(), is to program hardwares with preallocated
1054 * resources. In this way, it's easier to rollback when failing to
1055 * allocate resources.
1056 */
1057int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1058 unsigned int nr_irqs, int node, void *arg,
1059 bool realloc)
1060{
1061 int i, ret, virq;
1062
1063 if (domain == NULL) {
1064 domain = irq_default_domain;
1065 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1066 return -EINVAL;
1067 }
1068
1069 if (!domain->ops->alloc) {
1070 pr_debug("domain->ops->alloc() is NULL\n");
1071 return -ENOSYS;
1072 }
1073
1074 if (realloc && irq_base >= 0) {
1075 virq = irq_base;
1076 } else {
1077 virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node);
1078 if (virq < 0) {
1079 pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1080 irq_base, nr_irqs);
1081 return virq;
1082 }
1083 }
1084
1085 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1086 pr_debug("cannot allocate memory for IRQ%d\n", virq);
1087 ret = -ENOMEM;
1088 goto out_free_desc;
1089 }
1090
1091 mutex_lock(&irq_domain_mutex);
1092 ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg);
1093 if (ret < 0) {
1094 mutex_unlock(&irq_domain_mutex);
1095 goto out_free_irq_data;
1096 }
1097 for (i = 0; i < nr_irqs; i++)
1098 irq_domain_insert_irq(virq + i);
1099 mutex_unlock(&irq_domain_mutex);
1100
1101 return virq;
1102
1103out_free_irq_data:
1104 irq_domain_free_irq_data(virq, nr_irqs);
1105out_free_desc:
1106 irq_free_descs(virq, nr_irqs);
1107 return ret;
1108}
1109
1110/**
1111 * irq_domain_free_irqs - Free IRQ number and associated data structures
1112 * @virq: base IRQ number
1113 * @nr_irqs: number of IRQs to free
1114 */
1115void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1116{
1117 struct irq_data *data = irq_get_irq_data(virq);
1118 int i;
1119
1120 if (WARN(!data || !data->domain || !data->domain->ops->free,
1121 "NULL pointer, cannot free irq\n"))
1122 return;
1123
1124 mutex_lock(&irq_domain_mutex);
1125 for (i = 0; i < nr_irqs; i++)
1126 irq_domain_remove_irq(virq + i);
1127 irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs);
1128 mutex_unlock(&irq_domain_mutex);
1129
1130 irq_domain_free_irq_data(virq, nr_irqs);
1131 irq_free_descs(virq, nr_irqs);
1132}
1133
1134/**
1135 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1136 * @irq_base: Base IRQ number
1137 * @nr_irqs: Number of IRQs to allocate
1138 * @arg: Allocation data (arch/domain specific)
1139 *
1140 * Check whether the domain has been setup recursive. If not allocate
1141 * through the parent domain.
1142 */
1143int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1144 unsigned int irq_base, unsigned int nr_irqs,
1145 void *arg)
1146{
1147 /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */
1148 if (irq_domain_is_auto_recursive(domain))
1149 return 0;
1150
1151 domain = domain->parent;
1152 if (domain)
1153 return irq_domain_alloc_irqs_recursive(domain, irq_base,
1154 nr_irqs, arg);
1155 return -ENOSYS;
1156}
1157
1158/**
1159 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1160 * @irq_base: Base IRQ number
1161 * @nr_irqs: Number of IRQs to free
1162 *
1163 * Check whether the domain has been setup recursive. If not free
1164 * through the parent domain.
1165 */
1166void irq_domain_free_irqs_parent(struct irq_domain *domain,
1167 unsigned int irq_base, unsigned int nr_irqs)
1168{
1169 /* irq_domain_free_irqs_recursive() will call parent's free */
1170 if (!irq_domain_is_auto_recursive(domain) && domain->parent)
1171 irq_domain_free_irqs_recursive(domain->parent, irq_base,
1172 nr_irqs);
1173}
1174
1175/**
1176 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1177 * interrupt
1178 * @irq_data: outermost irq_data associated with interrupt
1179 *
1180 * This is the second step to call domain_ops->activate to program interrupt
1181 * controllers, so the interrupt could actually get delivered.
1182 */
1183void irq_domain_activate_irq(struct irq_data *irq_data)
1184{
1185 if (irq_data && irq_data->domain) {
1186 struct irq_domain *domain = irq_data->domain;
1187
1188 if (irq_data->parent_data)
1189 irq_domain_activate_irq(irq_data->parent_data);
1190 if (domain->ops->activate)
1191 domain->ops->activate(domain, irq_data);
1192 }
1193}
1194
1195/**
1196 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1197 * deactivate interrupt
1198 * @irq_data: outermost irq_data associated with interrupt
1199 *
1200 * It calls domain_ops->deactivate to program interrupt controllers to disable
1201 * interrupt delivery.
1202 */
1203void irq_domain_deactivate_irq(struct irq_data *irq_data)
1204{
1205 if (irq_data && irq_data->domain) {
1206 struct irq_domain *domain = irq_data->domain;
1207
1208 if (domain->ops->deactivate)
1209 domain->ops->deactivate(domain, irq_data);
1210 if (irq_data->parent_data)
1211 irq_domain_deactivate_irq(irq_data->parent_data);
1212 }
1213}
1214
1215static void irq_domain_check_hierarchy(struct irq_domain *domain)
1216{
1217 /* Hierarchy irq_domains must implement callback alloc() */
1218 if (domain->ops->alloc)
1219 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1220}
1221#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1222/**
1223 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1224 * @domain: domain to match
1225 * @virq: IRQ number to get irq_data
1226 */
1227struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1228 unsigned int virq)
1229{
1230 struct irq_data *irq_data = irq_get_irq_data(virq);
1231
1232 return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1233}
1234
1235static void irq_domain_check_hierarchy(struct irq_domain *domain)
1236{
1237}
1238#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a9104b4608b..80692373abd6 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -183,6 +183,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
183 ret = chip->irq_set_affinity(data, mask, force); 183 ret = chip->irq_set_affinity(data, mask, force);
184 switch (ret) { 184 switch (ret) {
185 case IRQ_SET_MASK_OK: 185 case IRQ_SET_MASK_OK:
186 case IRQ_SET_MASK_OK_DONE:
186 cpumask_copy(data->affinity, mask); 187 cpumask_copy(data->affinity, mask);
187 case IRQ_SET_MASK_OK_NOCOPY: 188 case IRQ_SET_MASK_OK_NOCOPY:
188 irq_set_thread_affinity(desc); 189 irq_set_thread_affinity(desc);
@@ -600,6 +601,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
600 601
601 switch (ret) { 602 switch (ret) {
602 case IRQ_SET_MASK_OK: 603 case IRQ_SET_MASK_OK:
604 case IRQ_SET_MASK_OK_DONE:
603 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 605 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
604 irqd_set(&desc->irq_data, flags); 606 irqd_set(&desc->irq_data, flags);
605 607
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
new file mode 100644
index 000000000000..3e18163f336f
--- /dev/null
+++ b/kernel/irq/msi.c
@@ -0,0 +1,330 @@
1/*
2 * linux/kernel/irq/msi.c
3 *
4 * Copyright (C) 2014 Intel Corp.
5 * Author: Jiang Liu <jiang.liu@linux.intel.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file contains common code to support Message Signalled Interrupt for
10 * PCI compatible and non PCI compatible devices.
11 */
12#include <linux/types.h>
13#include <linux/device.h>
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
16#include <linux/msi.h>
17
18/* Temparory solution for building, will be removed later */
19#include <linux/pci.h>
20
21void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
22{
23 *msg = entry->msg;
24}
25
26void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
27{
28 struct msi_desc *entry = irq_get_msi_desc(irq);
29
30 __get_cached_msi_msg(entry, msg);
31}
32EXPORT_SYMBOL_GPL(get_cached_msi_msg);
33
34#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
35static inline void irq_chip_write_msi_msg(struct irq_data *data,
36 struct msi_msg *msg)
37{
38 data->chip->irq_write_msi_msg(data, msg);
39}
40
41/**
42 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
43 * @irq_data: The irq data associated to the interrupt
44 * @mask: The affinity mask to set
45 * @force: Flag to enforce setting (disable online checks)
46 *
47 * Intended to be used by MSI interrupt controllers which are
48 * implemented with hierarchical domains.
49 */
50int msi_domain_set_affinity(struct irq_data *irq_data,
51 const struct cpumask *mask, bool force)
52{
53 struct irq_data *parent = irq_data->parent_data;
54 struct msi_msg msg;
55 int ret;
56
57 ret = parent->chip->irq_set_affinity(parent, mask, force);
58 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
59 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
60 irq_chip_write_msi_msg(irq_data, &msg);
61 }
62
63 return ret;
64}
65
66static void msi_domain_activate(struct irq_domain *domain,
67 struct irq_data *irq_data)
68{
69 struct msi_msg msg;
70
71 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
72 irq_chip_write_msi_msg(irq_data, &msg);
73}
74
75static void msi_domain_deactivate(struct irq_domain *domain,
76 struct irq_data *irq_data)
77{
78 struct msi_msg msg;
79
80 memset(&msg, 0, sizeof(msg));
81 irq_chip_write_msi_msg(irq_data, &msg);
82}
83
84static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
85 unsigned int nr_irqs, void *arg)
86{
87 struct msi_domain_info *info = domain->host_data;
88 struct msi_domain_ops *ops = info->ops;
89 irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
90 int i, ret;
91
92 if (irq_find_mapping(domain, hwirq) > 0)
93 return -EEXIST;
94
95 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
96 if (ret < 0)
97 return ret;
98
99 for (i = 0; i < nr_irqs; i++) {
100 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
101 if (ret < 0) {
102 if (ops->msi_free) {
103 for (i--; i > 0; i--)
104 ops->msi_free(domain, info, virq + i);
105 }
106 irq_domain_free_irqs_top(domain, virq, nr_irqs);
107 return ret;
108 }
109 }
110
111 return 0;
112}
113
114static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
115 unsigned int nr_irqs)
116{
117 struct msi_domain_info *info = domain->host_data;
118 int i;
119
120 if (info->ops->msi_free) {
121 for (i = 0; i < nr_irqs; i++)
122 info->ops->msi_free(domain, info, virq + i);
123 }
124 irq_domain_free_irqs_top(domain, virq, nr_irqs);
125}
126
127static struct irq_domain_ops msi_domain_ops = {
128 .alloc = msi_domain_alloc,
129 .free = msi_domain_free,
130 .activate = msi_domain_activate,
131 .deactivate = msi_domain_deactivate,
132};
133
134#ifdef GENERIC_MSI_DOMAIN_OPS
135static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
136 msi_alloc_info_t *arg)
137{
138 return arg->hwirq;
139}
140
141static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
142 int nvec, msi_alloc_info_t *arg)
143{
144 memset(arg, 0, sizeof(*arg));
145 return 0;
146}
147
148static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
149 struct msi_desc *desc)
150{
151 arg->desc = desc;
152}
153#else
154#define msi_domain_ops_get_hwirq NULL
155#define msi_domain_ops_prepare NULL
156#define msi_domain_ops_set_desc NULL
157#endif /* !GENERIC_MSI_DOMAIN_OPS */
158
159static int msi_domain_ops_init(struct irq_domain *domain,
160 struct msi_domain_info *info,
161 unsigned int virq, irq_hw_number_t hwirq,
162 msi_alloc_info_t *arg)
163{
164 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
165 info->chip_data);
166 if (info->handler && info->handler_name) {
167 __irq_set_handler(virq, info->handler, 0, info->handler_name);
168 if (info->handler_data)
169 irq_set_handler_data(virq, info->handler_data);
170 }
171 return 0;
172}
173
174static int msi_domain_ops_check(struct irq_domain *domain,
175 struct msi_domain_info *info,
176 struct device *dev)
177{
178 return 0;
179}
180
181static struct msi_domain_ops msi_domain_ops_default = {
182 .get_hwirq = msi_domain_ops_get_hwirq,
183 .msi_init = msi_domain_ops_init,
184 .msi_check = msi_domain_ops_check,
185 .msi_prepare = msi_domain_ops_prepare,
186 .set_desc = msi_domain_ops_set_desc,
187};
188
189static void msi_domain_update_dom_ops(struct msi_domain_info *info)
190{
191 struct msi_domain_ops *ops = info->ops;
192
193 if (ops == NULL) {
194 info->ops = &msi_domain_ops_default;
195 return;
196 }
197
198 if (ops->get_hwirq == NULL)
199 ops->get_hwirq = msi_domain_ops_default.get_hwirq;
200 if (ops->msi_init == NULL)
201 ops->msi_init = msi_domain_ops_default.msi_init;
202 if (ops->msi_check == NULL)
203 ops->msi_check = msi_domain_ops_default.msi_check;
204 if (ops->msi_prepare == NULL)
205 ops->msi_prepare = msi_domain_ops_default.msi_prepare;
206 if (ops->set_desc == NULL)
207 ops->set_desc = msi_domain_ops_default.set_desc;
208}
209
210static void msi_domain_update_chip_ops(struct msi_domain_info *info)
211{
212 struct irq_chip *chip = info->chip;
213
214 BUG_ON(!chip);
215 if (!chip->irq_mask)
216 chip->irq_mask = pci_msi_mask_irq;
217 if (!chip->irq_unmask)
218 chip->irq_unmask = pci_msi_unmask_irq;
219 if (!chip->irq_set_affinity)
220 chip->irq_set_affinity = msi_domain_set_affinity;
221}
222
223/**
224 * msi_create_irq_domain - Create a MSI interrupt domain
225 * @of_node: Optional device-tree node of the interrupt controller
226 * @info: MSI domain info
227 * @parent: Parent irq domain
228 */
229struct irq_domain *msi_create_irq_domain(struct device_node *node,
230 struct msi_domain_info *info,
231 struct irq_domain *parent)
232{
233 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
234 msi_domain_update_dom_ops(info);
235 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
236 msi_domain_update_chip_ops(info);
237
238 return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops,
239 info);
240}
241
242/**
243 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
244 * @domain: The domain to allocate from
245 * @dev: Pointer to device struct of the device for which the interrupts
246 * are allocated
247 * @nvec: The number of interrupts to allocate
248 *
249 * Returns 0 on success or an error code.
250 */
251int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
252 int nvec)
253{
254 struct msi_domain_info *info = domain->host_data;
255 struct msi_domain_ops *ops = info->ops;
256 msi_alloc_info_t arg;
257 struct msi_desc *desc;
258 int i, ret, virq = -1;
259
260 ret = ops->msi_check(domain, info, dev);
261 if (ret == 0)
262 ret = ops->msi_prepare(domain, dev, nvec, &arg);
263 if (ret)
264 return ret;
265
266 for_each_msi_entry(desc, dev) {
267 ops->set_desc(&arg, desc);
268 if (info->flags & MSI_FLAG_IDENTITY_MAP)
269 virq = (int)ops->get_hwirq(info, &arg);
270 else
271 virq = -1;
272
273 virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used,
274 dev_to_node(dev), &arg, false);
275 if (virq < 0) {
276 ret = -ENOSPC;
277 if (ops->handle_error)
278 ret = ops->handle_error(domain, desc, ret);
279 if (ops->msi_finish)
280 ops->msi_finish(&arg, ret);
281 return ret;
282 }
283
284 for (i = 0; i < desc->nvec_used; i++)
285 irq_set_msi_desc_off(virq, i, desc);
286 }
287
288 if (ops->msi_finish)
289 ops->msi_finish(&arg, 0);
290
291 for_each_msi_entry(desc, dev) {
292 if (desc->nvec_used == 1)
293 dev_dbg(dev, "irq %d for MSI\n", virq);
294 else
295 dev_dbg(dev, "irq [%d-%d] for MSI\n",
296 virq, virq + desc->nvec_used - 1);
297 }
298
299 return 0;
300}
301
302/**
303 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
304 * @domain: The domain to managing the interrupts
305 * @dev: Pointer to device struct of the device for which the interrupts
306 * are free
307 */
308void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
309{
310 struct msi_desc *desc;
311
312 for_each_msi_entry(desc, dev) {
313 irq_domain_free_irqs(desc->irq, desc->nvec_used);
314 desc->irq = 0;
315 }
316}
317
318/**
319 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
320 * @domain: The interrupt domain to retrieve data from
321 *
322 * Returns the pointer to the msi_domain_info stored in
323 * @domain->host_data.
324 */
325struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
326{
327 return (struct msi_domain_info *)domain->host_data;
328}
329
330#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */