aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2016-01-15 17:14:39 -0500
committerBjorn Helgaas <bhelgaas@google.com>2016-01-15 17:14:39 -0500
commit3a6384ba105ebbb5e83d040a55bfad2fa43f3048 (patch)
treecab88a093690d0a3d38303d4bd53db132b7b55ea
parent472358412b93025b13f39e46008af3ed9541ae2b (diff)
parent185a383ada2e7794b0e82e040223e741b24d2bf8 (diff)
Merge branch 'pci/host-vmd' into next
* pci/host-vmd: x86/PCI: Add driver for Intel Volume Management Device (VMD) PCI/AER: Use 32 bit PCI domain numbers x86/PCI: Allow DMA ops specific to a PCI domain irqdomain: Export irq_domain_set_info() for module use genirq/MSI: Relax msi_domain_alloc() to support parentless MSI irqdomains
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/include/asm/device.h10
-rw-r--r--arch/x86/include/asm/hw_irq.h5
-rw-r--r--arch/x86/pci/Makefile2
-rw-r--r--arch/x86/pci/common.c38
-rw-r--r--arch/x86/pci/vmd.c723
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c16
-rw-r--r--kernel/irq/irqdomain.c1
-rw-r--r--kernel/irq/msi.c8
10 files changed, 811 insertions, 11 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 9ea78171b43c..94544db33000 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8216,6 +8216,12 @@ S: Maintained
8216F: Documentation/devicetree/bindings/pci/host-generic-pci.txt 8216F: Documentation/devicetree/bindings/pci/host-generic-pci.txt
8217F: drivers/pci/host/pci-host-generic.c 8217F: drivers/pci/host/pci-host-generic.c
8218 8218
8219PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
8220M: Keith Busch <keith.busch@intel.com>
8221L: linux-pci@vger.kernel.org
8222S: Supported
8223F: arch/x86/pci/vmd.c
8224
8219PCIE DRIVER FOR ST SPEAR13XX 8225PCIE DRIVER FOR ST SPEAR13XX
8220M: Pratyush Anand <pratyush.anand@gmail.com> 8226M: Pratyush Anand <pratyush.anand@gmail.com>
8221L: linux-pci@vger.kernel.org 8227L: linux-pci@vger.kernel.org
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index db3622f22b61..3e6aca822295 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2665,6 +2665,19 @@ config PMC_ATOM
2665 def_bool y 2665 def_bool y
2666 depends on PCI 2666 depends on PCI
2667 2667
2668config VMD
2669 depends on PCI_MSI
2670 tristate "Volume Management Device Driver"
2671 default N
2672 ---help---
2673 Adds support for the Intel Volume Management Device (VMD). VMD is a
2674 secondary PCI host bridge that allows PCI Express root ports,
2675 and devices attached to them, to be removed from the default
2676 PCI domain and placed within the VMD domain. This provides
2677 more bus resources than are otherwise possible with a
2678 single domain. If you know your system provides one of these and
2679 has devices attached to it, say Y; if you are not sure, say N.
2680
2668source "net/Kconfig" 2681source "net/Kconfig"
2669 2682
2670source "drivers/Kconfig" 2683source "drivers/Kconfig"
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 03dd72957d2f..684ed6c3aa67 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -10,6 +10,16 @@ struct dev_archdata {
10#endif 10#endif
11}; 11};
12 12
13#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
14struct dma_domain {
15 struct list_head node;
16 struct dma_map_ops *dma_ops;
17 int domain_nr;
18};
19void add_dma_domain(struct dma_domain *domain);
20void del_dma_domain(struct dma_domain *domain);
21#endif
22
13struct pdev_archdata { 23struct pdev_archdata {
14}; 24};
15 25
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 1e3408e88604..1815b736269d 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -130,6 +130,11 @@ struct irq_alloc_info {
130 char *uv_name; 130 char *uv_name;
131 }; 131 };
132#endif 132#endif
133#if IS_ENABLED(CONFIG_VMD)
134 struct {
135 struct msi_desc *desc;
136 };
137#endif
133 }; 138 };
134}; 139};
135 140
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 5c6fc3577a49..97062a635b77 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -23,6 +23,8 @@ obj-y += bus_numa.o
23obj-$(CONFIG_AMD_NB) += amd_bus.o 23obj-$(CONFIG_AMD_NB) += amd_bus.o
24obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o 24obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
25 25
26obj-$(CONFIG_VMD) += vmd.o
27
26ifeq ($(CONFIG_PCI_DEBUG),y) 28ifeq ($(CONFIG_PCI_DEBUG),y)
27EXTRA_CFLAGS += -DDEBUG 29EXTRA_CFLAGS += -DDEBUG
28endif 30endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index eccd4d99e6a4..2879efc73a96 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -641,6 +641,43 @@ unsigned int pcibios_assign_all_busses(void)
641 return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0; 641 return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
642} 642}
643 643
644#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
645static LIST_HEAD(dma_domain_list);
646static DEFINE_SPINLOCK(dma_domain_list_lock);
647
648void add_dma_domain(struct dma_domain *domain)
649{
650 spin_lock(&dma_domain_list_lock);
651 list_add(&domain->node, &dma_domain_list);
652 spin_unlock(&dma_domain_list_lock);
653}
654EXPORT_SYMBOL_GPL(add_dma_domain);
655
656void del_dma_domain(struct dma_domain *domain)
657{
658 spin_lock(&dma_domain_list_lock);
659 list_del(&domain->node);
660 spin_unlock(&dma_domain_list_lock);
661}
662EXPORT_SYMBOL_GPL(del_dma_domain);
663
664static void set_dma_domain_ops(struct pci_dev *pdev)
665{
666 struct dma_domain *domain;
667
668 spin_lock(&dma_domain_list_lock);
669 list_for_each_entry(domain, &dma_domain_list, node) {
670 if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
671 pdev->dev.archdata.dma_ops = domain->dma_ops;
672 break;
673 }
674 }
675 spin_unlock(&dma_domain_list_lock);
676}
677#else
678static void set_dma_domain_ops(struct pci_dev *pdev) {}
679#endif
680
644int pcibios_add_device(struct pci_dev *dev) 681int pcibios_add_device(struct pci_dev *dev)
645{ 682{
646 struct setup_data *data; 683 struct setup_data *data;
@@ -670,6 +707,7 @@ int pcibios_add_device(struct pci_dev *dev)
670 pa_data = data->next; 707 pa_data = data->next;
671 iounmap(data); 708 iounmap(data);
672 } 709 }
710 set_dma_domain_ops(dev);
673 return 0; 711 return 0;
674} 712}
675 713
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c
new file mode 100644
index 000000000000..d57e48016f15
--- /dev/null
+++ b/arch/x86/pci/vmd.c
@@ -0,0 +1,723 @@
1/*
2 * Volume Management Device driver
3 * Copyright (c) 2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/msi.h>
21#include <linux/pci.h>
22#include <linux/rculist.h>
23#include <linux/rcupdate.h>
24
25#include <asm/irqdomain.h>
26#include <asm/device.h>
27#include <asm/msi.h>
28#include <asm/msidef.h>
29
30#define VMD_CFGBAR 0
31#define VMD_MEMBAR1 2
32#define VMD_MEMBAR2 4
33
34/*
35 * Lock for manipulating VMD IRQ lists.
36 */
37static DEFINE_RAW_SPINLOCK(list_lock);
38
39/**
40 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
41 * @node: list item for parent traversal.
42 * @rcu: RCU callback item for freeing.
43 * @irq: back pointer to parent.
44 * @virq: the virtual IRQ value provided to the requesting driver.
45 *
46 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
47 * a VMD IRQ using this structure.
48 */
49struct vmd_irq {
50 struct list_head node;
51 struct rcu_head rcu;
52 struct vmd_irq_list *irq;
53 unsigned int virq;
54};
55
56/**
57 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
58 * @irq_list: the list of irq's the VMD one demuxes to.
59 * @vmd_vector: the h/w IRQ assigned to the VMD.
60 * @index: index into the VMD MSI-X table; used for message routing.
61 * @count: number of child IRQs assigned to this vector; used to track
62 * sharing.
63 */
64struct vmd_irq_list {
65 struct list_head irq_list;
66 struct vmd_dev *vmd;
67 unsigned int vmd_vector;
68 unsigned int index;
69 unsigned int count;
70};
71
72struct vmd_dev {
73 struct pci_dev *dev;
74
75 spinlock_t cfg_lock;
76 char __iomem *cfgbar;
77
78 int msix_count;
79 struct msix_entry *msix_entries;
80 struct vmd_irq_list *irqs;
81
82 struct pci_sysdata sysdata;
83 struct resource resources[3];
84 struct irq_domain *irq_domain;
85 struct pci_bus *bus;
86
87#ifdef CONFIG_X86_DEV_DMA_OPS
88 struct dma_map_ops dma_ops;
89 struct dma_domain dma_domain;
90#endif
91};
92
93static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
94{
95 return container_of(bus->sysdata, struct vmd_dev, sysdata);
96}
97
98/*
99 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
100 * but the MSI entry for the hardware it's driving will be programmed with a
101 * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
102 * domain into one of its own, and the VMD driver de-muxes these for the
103 * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
104 * and irq_chip to set this up.
105 */
106static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
107{
108 struct vmd_irq *vmdirq = data->chip_data;
109 struct vmd_irq_list *irq = vmdirq->irq;
110
111 msg->address_hi = MSI_ADDR_BASE_HI;
112 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(irq->index);
113 msg->data = 0;
114}
115
116/*
117 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
118 */
119static void vmd_irq_enable(struct irq_data *data)
120{
121 struct vmd_irq *vmdirq = data->chip_data;
122
123 raw_spin_lock(&list_lock);
124 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
125 raw_spin_unlock(&list_lock);
126
127 data->chip->irq_unmask(data);
128}
129
130static void vmd_irq_disable(struct irq_data *data)
131{
132 struct vmd_irq *vmdirq = data->chip_data;
133
134 data->chip->irq_mask(data);
135
136 raw_spin_lock(&list_lock);
137 list_del_rcu(&vmdirq->node);
138 raw_spin_unlock(&list_lock);
139}
140
141/*
142 * XXX: Stubbed until we develop acceptable way to not create conflicts with
143 * other devices sharing the same vector.
144 */
145static int vmd_irq_set_affinity(struct irq_data *data,
146 const struct cpumask *dest, bool force)
147{
148 return -EINVAL;
149}
150
151static struct irq_chip vmd_msi_controller = {
152 .name = "VMD-MSI",
153 .irq_enable = vmd_irq_enable,
154 .irq_disable = vmd_irq_disable,
155 .irq_compose_msi_msg = vmd_compose_msi_msg,
156 .irq_set_affinity = vmd_irq_set_affinity,
157};
158
159static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
160 msi_alloc_info_t *arg)
161{
162 return 0;
163}
164
165/*
166 * XXX: We can be even smarter selecting the best IRQ once we solve the
167 * affinity problem.
168 */
169static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd)
170{
171 int i, best = 0;
172
173 raw_spin_lock(&list_lock);
174 for (i = 1; i < vmd->msix_count; i++)
175 if (vmd->irqs[i].count < vmd->irqs[best].count)
176 best = i;
177 vmd->irqs[best].count++;
178 raw_spin_unlock(&list_lock);
179
180 return &vmd->irqs[best];
181}
182
183static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
184 unsigned int virq, irq_hw_number_t hwirq,
185 msi_alloc_info_t *arg)
186{
187 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus);
188 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
189
190 if (!vmdirq)
191 return -ENOMEM;
192
193 INIT_LIST_HEAD(&vmdirq->node);
194 vmdirq->irq = vmd_next_irq(vmd);
195 vmdirq->virq = virq;
196
197 irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
198 vmdirq, handle_simple_irq, vmd, NULL);
199 return 0;
200}
201
202static void vmd_msi_free(struct irq_domain *domain,
203 struct msi_domain_info *info, unsigned int virq)
204{
205 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
206
207 /* XXX: Potential optimization to rebalance */
208 raw_spin_lock(&list_lock);
209 vmdirq->irq->count--;
210 raw_spin_unlock(&list_lock);
211
212 kfree_rcu(vmdirq, rcu);
213}
214
215static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
216 int nvec, msi_alloc_info_t *arg)
217{
218 struct pci_dev *pdev = to_pci_dev(dev);
219 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
220
221 if (nvec > vmd->msix_count)
222 return vmd->msix_count;
223
224 memset(arg, 0, sizeof(*arg));
225 return 0;
226}
227
228static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
229{
230 arg->desc = desc;
231}
232
233static struct msi_domain_ops vmd_msi_domain_ops = {
234 .get_hwirq = vmd_get_hwirq,
235 .msi_init = vmd_msi_init,
236 .msi_free = vmd_msi_free,
237 .msi_prepare = vmd_msi_prepare,
238 .set_desc = vmd_set_desc,
239};
240
241static struct msi_domain_info vmd_msi_domain_info = {
242 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
243 MSI_FLAG_PCI_MSIX,
244 .ops = &vmd_msi_domain_ops,
245 .chip = &vmd_msi_controller,
246};
247
248#ifdef CONFIG_X86_DEV_DMA_OPS
249/*
250 * VMD replaces the requester ID with its own. DMA mappings for devices in a
251 * VMD domain need to be mapped for the VMD, not the device requiring
252 * the mapping.
253 */
254static struct device *to_vmd_dev(struct device *dev)
255{
256 struct pci_dev *pdev = to_pci_dev(dev);
257 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
258
259 return &vmd->dev->dev;
260}
261
262static struct dma_map_ops *vmd_dma_ops(struct device *dev)
263{
264 return to_vmd_dev(dev)->archdata.dma_ops;
265}
266
267static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
268 gfp_t flag, struct dma_attrs *attrs)
269{
270 return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
271 attrs);
272}
273
274static void vmd_free(struct device *dev, size_t size, void *vaddr,
275 dma_addr_t addr, struct dma_attrs *attrs)
276{
277 return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
278 attrs);
279}
280
281static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
282 void *cpu_addr, dma_addr_t addr, size_t size,
283 struct dma_attrs *attrs)
284{
285 return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
286 size, attrs);
287}
288
289static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
290 void *cpu_addr, dma_addr_t addr, size_t size,
291 struct dma_attrs *attrs)
292{
293 return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
294 addr, size, attrs);
295}
296
297static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
298 unsigned long offset, size_t size,
299 enum dma_data_direction dir,
300 struct dma_attrs *attrs)
301{
302 return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
303 dir, attrs);
304}
305
306static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
307 enum dma_data_direction dir, struct dma_attrs *attrs)
308{
309 vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
310}
311
312static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
313 enum dma_data_direction dir, struct dma_attrs *attrs)
314{
315 return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
316}
317
318static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
319 enum dma_data_direction dir, struct dma_attrs *attrs)
320{
321 vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
322}
323
324static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
325 size_t size, enum dma_data_direction dir)
326{
327 vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
328}
329
330static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
331 size_t size, enum dma_data_direction dir)
332{
333 vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
334 dir);
335}
336
337static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
338 int nents, enum dma_data_direction dir)
339{
340 vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
341}
342
343static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
344 int nents, enum dma_data_direction dir)
345{
346 vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
347}
348
349static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
350{
351 return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
352}
353
354static int vmd_dma_supported(struct device *dev, u64 mask)
355{
356 return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
357}
358
359#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
360static u64 vmd_get_required_mask(struct device *dev)
361{
362 return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
363}
364#endif
365
366static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
367{
368 struct dma_domain *domain = &vmd->dma_domain;
369
370 if (vmd->dev->dev.archdata.dma_ops)
371 del_dma_domain(domain);
372}
373
374#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
375 do { \
376 if (source->fn) \
377 dest->fn = vmd_##fn; \
378 } while (0)
379
380static void vmd_setup_dma_ops(struct vmd_dev *vmd)
381{
382 const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops;
383 struct dma_map_ops *dest = &vmd->dma_ops;
384 struct dma_domain *domain = &vmd->dma_domain;
385
386 domain->domain_nr = vmd->sysdata.domain;
387 domain->dma_ops = dest;
388
389 if (!source)
390 return;
391 ASSIGN_VMD_DMA_OPS(source, dest, alloc);
392 ASSIGN_VMD_DMA_OPS(source, dest, free);
393 ASSIGN_VMD_DMA_OPS(source, dest, mmap);
394 ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
395 ASSIGN_VMD_DMA_OPS(source, dest, map_page);
396 ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
397 ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
398 ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
399 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
400 ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
401 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
402 ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
403 ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
404 ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
405#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
406 ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
407#endif
408 add_dma_domain(domain);
409}
410#undef ASSIGN_VMD_DMA_OPS
411#else
412static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
413static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
414#endif
415
416static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
417 unsigned int devfn, int reg, int len)
418{
419 char __iomem *addr = vmd->cfgbar +
420 (bus->number << 20) + (devfn << 12) + reg;
421
422 if ((addr - vmd->cfgbar) + len >=
423 resource_size(&vmd->dev->resource[VMD_CFGBAR]))
424 return NULL;
425
426 return addr;
427}
428
429/*
430 * CPU may deadlock if config space is not serialized on some versions of this
431 * hardware, so all config space access is done under a spinlock.
432 */
433static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
434 int len, u32 *value)
435{
436 struct vmd_dev *vmd = vmd_from_bus(bus);
437 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
438 unsigned long flags;
439 int ret = 0;
440
441 if (!addr)
442 return -EFAULT;
443
444 spin_lock_irqsave(&vmd->cfg_lock, flags);
445 switch (len) {
446 case 1:
447 *value = readb(addr);
448 break;
449 case 2:
450 *value = readw(addr);
451 break;
452 case 4:
453 *value = readl(addr);
454 break;
455 default:
456 ret = -EINVAL;
457 break;
458 }
459 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
460 return ret;
461}
462
463/*
464 * VMD h/w converts non-posted config writes to posted memory writes. The
465 * read-back in this function forces the completion so it returns only after
466 * the config space was written, as expected.
467 */
468static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
469 int len, u32 value)
470{
471 struct vmd_dev *vmd = vmd_from_bus(bus);
472 char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
473 unsigned long flags;
474 int ret = 0;
475
476 if (!addr)
477 return -EFAULT;
478
479 spin_lock_irqsave(&vmd->cfg_lock, flags);
480 switch (len) {
481 case 1:
482 writeb(value, addr);
483 readb(addr);
484 break;
485 case 2:
486 writew(value, addr);
487 readw(addr);
488 break;
489 case 4:
490 writel(value, addr);
491 readl(addr);
492 break;
493 default:
494 ret = -EINVAL;
495 break;
496 }
497 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
498 return ret;
499}
500
501static struct pci_ops vmd_ops = {
502 .read = vmd_pci_read,
503 .write = vmd_pci_write,
504};
505
506/*
507 * VMD domains start at 0x1000 to not clash with ACPI _SEG domains.
508 */
509static int vmd_find_free_domain(void)
510{
511 int domain = 0xffff;
512 struct pci_bus *bus = NULL;
513
514 while ((bus = pci_find_next_bus(bus)) != NULL)
515 domain = max_t(int, domain, pci_domain_nr(bus));
516 return domain + 1;
517}
518
519static int vmd_enable_domain(struct vmd_dev *vmd)
520{
521 struct pci_sysdata *sd = &vmd->sysdata;
522 struct resource *res;
523 u32 upper_bits;
524 unsigned long flags;
525 LIST_HEAD(resources);
526
527 res = &vmd->dev->resource[VMD_CFGBAR];
528 vmd->resources[0] = (struct resource) {
529 .name = "VMD CFGBAR",
530 .start = res->start,
531 .end = (resource_size(res) >> 20) - 1,
532 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
533 };
534
535 res = &vmd->dev->resource[VMD_MEMBAR1];
536 upper_bits = upper_32_bits(res->end);
537 flags = res->flags & ~IORESOURCE_SIZEALIGN;
538 if (!upper_bits)
539 flags &= ~IORESOURCE_MEM_64;
540 vmd->resources[1] = (struct resource) {
541 .name = "VMD MEMBAR1",
542 .start = res->start,
543 .end = res->end,
544 .flags = flags,
545 };
546
547 res = &vmd->dev->resource[VMD_MEMBAR2];
548 upper_bits = upper_32_bits(res->end);
549 flags = res->flags & ~IORESOURCE_SIZEALIGN;
550 if (!upper_bits)
551 flags &= ~IORESOURCE_MEM_64;
552 vmd->resources[2] = (struct resource) {
553 .name = "VMD MEMBAR2",
554 .start = res->start + 0x2000,
555 .end = res->end,
556 .flags = flags,
557 };
558
559 sd->domain = vmd_find_free_domain();
560 if (sd->domain < 0)
561 return sd->domain;
562
563 sd->node = pcibus_to_node(vmd->dev->bus);
564
565 vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info,
566 NULL);
567 if (!vmd->irq_domain)
568 return -ENODEV;
569
570 pci_add_resource(&resources, &vmd->resources[0]);
571 pci_add_resource(&resources, &vmd->resources[1]);
572 pci_add_resource(&resources, &vmd->resources[2]);
573 vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
574 &resources);
575 if (!vmd->bus) {
576 pci_free_resource_list(&resources);
577 irq_domain_remove(vmd->irq_domain);
578 return -ENODEV;
579 }
580
581 vmd_setup_dma_ops(vmd);
582 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
583 pci_rescan_bus(vmd->bus);
584
585 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
586 "domain"), "Can't create symlink to domain\n");
587 return 0;
588}
589
590static irqreturn_t vmd_irq(int irq, void *data)
591{
592 struct vmd_irq_list *irqs = data;
593 struct vmd_irq *vmdirq;
594
595 rcu_read_lock();
596 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
597 generic_handle_irq(vmdirq->virq);
598 rcu_read_unlock();
599
600 return IRQ_HANDLED;
601}
602
603static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
604{
605 struct vmd_dev *vmd;
606 int i, err;
607
608 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
609 return -ENOMEM;
610
611 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
612 if (!vmd)
613 return -ENOMEM;
614
615 vmd->dev = dev;
616 err = pcim_enable_device(dev);
617 if (err < 0)
618 return err;
619
620 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
621 if (!vmd->cfgbar)
622 return -ENOMEM;
623
624 pci_set_master(dev);
625 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
626 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
627 return -ENODEV;
628
629 vmd->msix_count = pci_msix_vec_count(dev);
630 if (vmd->msix_count < 0)
631 return -ENODEV;
632
633 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
634 GFP_KERNEL);
635 if (!vmd->irqs)
636 return -ENOMEM;
637
638 vmd->msix_entries = devm_kcalloc(&dev->dev, vmd->msix_count,
639 sizeof(*vmd->msix_entries),
640 GFP_KERNEL);
641 if (!vmd->msix_entries)
642 return -ENOMEM;
643 for (i = 0; i < vmd->msix_count; i++)
644 vmd->msix_entries[i].entry = i;
645
646 vmd->msix_count = pci_enable_msix_range(vmd->dev, vmd->msix_entries, 1,
647 vmd->msix_count);
648 if (vmd->msix_count < 0)
649 return vmd->msix_count;
650
651 for (i = 0; i < vmd->msix_count; i++) {
652 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
653 vmd->irqs[i].vmd_vector = vmd->msix_entries[i].vector;
654 vmd->irqs[i].index = i;
655
656 err = devm_request_irq(&dev->dev, vmd->irqs[i].vmd_vector,
657 vmd_irq, 0, "vmd", &vmd->irqs[i]);
658 if (err)
659 return err;
660 }
661
662 spin_lock_init(&vmd->cfg_lock);
663 pci_set_drvdata(dev, vmd);
664 err = vmd_enable_domain(vmd);
665 if (err)
666 return err;
667
668 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
669 vmd->sysdata.domain);
670 return 0;
671}
672
673static void vmd_remove(struct pci_dev *dev)
674{
675 struct vmd_dev *vmd = pci_get_drvdata(dev);
676
677 pci_set_drvdata(dev, NULL);
678 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
679 pci_stop_root_bus(vmd->bus);
680 pci_remove_root_bus(vmd->bus);
681 vmd_teardown_dma_ops(vmd);
682 irq_domain_remove(vmd->irq_domain);
683}
684
685#ifdef CONFIG_PM
686static int vmd_suspend(struct device *dev)
687{
688 struct pci_dev *pdev = to_pci_dev(dev);
689
690 pci_save_state(pdev);
691 return 0;
692}
693
694static int vmd_resume(struct device *dev)
695{
696 struct pci_dev *pdev = to_pci_dev(dev);
697
698 pci_restore_state(pdev);
699 return 0;
700}
701#endif
702static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
703
704static const struct pci_device_id vmd_ids[] = {
705 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
706 {0,}
707};
708MODULE_DEVICE_TABLE(pci, vmd_ids);
709
710static struct pci_driver vmd_drv = {
711 .name = "vmd",
712 .id_table = vmd_ids,
713 .probe = vmd_probe,
714 .remove = vmd_remove,
715 .driver = {
716 .pm = &vmd_dev_pm_ops,
717 },
718};
719module_pci_driver(vmd_drv);
720
721MODULE_AUTHOR("Intel Corporation");
722MODULE_LICENSE("GPL v2");
723MODULE_VERSION("0.6");
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 182224acedbe..20db790465dd 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -41,12 +41,12 @@ struct aer_error_inj {
41 u32 header_log1; 41 u32 header_log1;
42 u32 header_log2; 42 u32 header_log2;
43 u32 header_log3; 43 u32 header_log3;
44 u16 domain; 44 u32 domain;
45}; 45};
46 46
47struct aer_error { 47struct aer_error {
48 struct list_head list; 48 struct list_head list;
49 u16 domain; 49 u32 domain;
50 unsigned int bus; 50 unsigned int bus;
51 unsigned int devfn; 51 unsigned int devfn;
52 int pos_cap_err; 52 int pos_cap_err;
@@ -74,7 +74,7 @@ static LIST_HEAD(pci_bus_ops_list);
74/* Protect einjected and pci_bus_ops_list */ 74/* Protect einjected and pci_bus_ops_list */
75static DEFINE_SPINLOCK(inject_lock); 75static DEFINE_SPINLOCK(inject_lock);
76 76
77static void aer_error_init(struct aer_error *err, u16 domain, 77static void aer_error_init(struct aer_error *err, u32 domain,
78 unsigned int bus, unsigned int devfn, 78 unsigned int bus, unsigned int devfn,
79 int pos_cap_err) 79 int pos_cap_err)
80{ 80{
@@ -86,7 +86,7 @@ static void aer_error_init(struct aer_error *err, u16 domain,
86} 86}
87 87
88/* inject_lock must be held before calling */ 88/* inject_lock must be held before calling */
89static struct aer_error *__find_aer_error(u16 domain, unsigned int bus, 89static struct aer_error *__find_aer_error(u32 domain, unsigned int bus,
90 unsigned int devfn) 90 unsigned int devfn)
91{ 91{
92 struct aer_error *err; 92 struct aer_error *err;
@@ -106,7 +106,7 @@ static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
106 int domain = pci_domain_nr(dev->bus); 106 int domain = pci_domain_nr(dev->bus);
107 if (domain < 0) 107 if (domain < 0)
108 return NULL; 108 return NULL;
109 return __find_aer_error((u16)domain, dev->bus->number, dev->devfn); 109 return __find_aer_error(domain, dev->bus->number, dev->devfn);
110} 110}
111 111
112/* inject_lock must be held before calling */ 112/* inject_lock must be held before calling */
@@ -196,7 +196,7 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
196 domain = pci_domain_nr(bus); 196 domain = pci_domain_nr(bus);
197 if (domain < 0) 197 if (domain < 0)
198 goto out; 198 goto out;
199 err = __find_aer_error((u16)domain, bus->number, devfn); 199 err = __find_aer_error(domain, bus->number, devfn);
200 if (!err) 200 if (!err)
201 goto out; 201 goto out;
202 202
@@ -228,7 +228,7 @@ static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
228 domain = pci_domain_nr(bus); 228 domain = pci_domain_nr(bus);
229 if (domain < 0) 229 if (domain < 0)
230 goto out; 230 goto out;
231 err = __find_aer_error((u16)domain, bus->number, devfn); 231 err = __find_aer_error(domain, bus->number, devfn);
232 if (!err) 232 if (!err)
233 goto out; 233 goto out;
234 234
@@ -329,7 +329,7 @@ static int aer_inject(struct aer_error_inj *einj)
329 u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0; 329 u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
330 int ret = 0; 330 int ret = 0;
331 331
332 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); 332 dev = pci_get_domain_bus_and_slot(einj->domain, einj->bus, devfn);
333 if (!dev) 333 if (!dev)
334 return -ENODEV; 334 return -ENODEV;
335 rpdev = pcie_find_root_port(dev); 335 rpdev = pcie_find_root_port(dev);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 22aa9612ef7c..ca05cc841f00 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1058,6 +1058,7 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1058 __irq_set_handler(virq, handler, 0, handler_name); 1058 __irq_set_handler(virq, handler, 0, handler_name);
1059 irq_set_handler_data(virq, handler_data); 1059 irq_set_handler_data(virq, handler_data);
1060} 1060}
1061EXPORT_SYMBOL(irq_domain_set_info);
1061 1062
1062/** 1063/**
1063 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data 1064 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 6b0c0b74a2a1..5e15cb4b2f00 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -109,9 +109,11 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
109 if (irq_find_mapping(domain, hwirq) > 0) 109 if (irq_find_mapping(domain, hwirq) > 0)
110 return -EEXIST; 110 return -EEXIST;
111 111
112 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 112 if (domain->parent) {
113 if (ret < 0) 113 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
114 return ret; 114 if (ret < 0)
115 return ret;
116 }
115 117
116 for (i = 0; i < nr_irqs; i++) { 118 for (i = 0; i < nr_irqs; i++) {
117 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); 119 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);