aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-API-HOWTO.txt29
-rw-r--r--Documentation/DMA-API.txt30
-rw-r--r--Documentation/devicetree/bindings/pci/xgene-pci-msi.txt68
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/alpha/include/asm/pci.h16
-rw-r--r--arch/alpha/kernel/core_irongate.c1
-rw-r--r--arch/alpha/kernel/sys_eiger.c1
-rw-r--r--arch/alpha/kernel/sys_nautilus.c1
-rw-r--r--arch/arm/include/asm/pci.h10
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi27
-rw-r--r--arch/frv/include/asm/pci.h10
-rw-r--r--arch/ia64/include/asm/pci.h32
-rw-r--r--arch/microblaze/include/asm/pci.h23
-rw-r--r--arch/mips/include/asm/pci.h10
-rw-r--r--arch/mips/pci/fixup-cobalt.c1
-rw-r--r--arch/mips/pci/ops-mace.c1
-rw-r--r--arch/mips/pci/pci-lantiq.c1
-rw-r--r--arch/mn10300/include/asm/pci.h13
-rw-r--r--arch/parisc/include/asm/pci.h19
-rw-r--r--arch/powerpc/include/asm/pci.h30
-rw-r--r--arch/powerpc/kernel/prom.c1
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c2
-rw-r--r--arch/s390/kernel/suspend.c2
-rw-r--r--arch/sh/drivers/pci/ops-sh5.c1
-rw-r--r--arch/sh/drivers/pci/pci-sh5.c1
-rw-r--r--arch/sh/include/asm/pci.h18
-rw-r--r--arch/sparc/include/asm/pci_32.h10
-rw-r--r--arch/sparc/include/asm/pci_64.h19
-rw-r--r--arch/unicore32/include/asm/pci.h10
-rw-r--r--arch/x86/include/asm/pci.h7
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/pci/acpi.c17
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c1
-rw-r--r--drivers/ntb/ntb_hw.c2
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/bus.c10
-rw-r--r--drivers/pci/host/Kconfig20
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-dra7xx.c19
-rw-r--r--drivers/pci/host/pci-exynos.c34
-rw-r--r--drivers/pci/host/pci-imx6.c88
-rw-r--r--drivers/pci/host/pci-keystone.c16
-rw-r--r--drivers/pci/host/pci-layerscape.c25
-rw-r--r--drivers/pci/host/pci-mvebu.c18
-rw-r--r--drivers/pci/host/pci-tegra.c16
-rw-r--r--drivers/pci/host/pci-xgene-msi.c596
-rw-r--r--drivers/pci/host/pci-xgene.c66
-rw-r--r--drivers/pci/host/pcie-designware.c154
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c110
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c12
-rw-r--r--drivers/pci/host/pcie-iproc.c6
-rw-r--r--drivers/pci/host/pcie-iproc.h4
-rw-r--r--drivers/pci/host/pcie-spear13xx.c17
-rw-r--r--drivers/pci/hotplug/Makefile3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c5
-rw-r--r--drivers/pci/hotplug/pciehp.h23
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c137
-rw-r--r--drivers/pci/hotplug/pciehp_core.c54
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c154
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c145
-rw-r--r--drivers/pci/msi.c53
-rw-r--r--drivers/pci/pci.c44
-rw-r--r--drivers/pci/pci.h32
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c3
-rw-r--r--drivers/pci/pcie/aspm.c57
-rw-r--r--drivers/pci/probe.c69
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/vc.c3
-rw-r--r--drivers/pci/xen-pcifront.c16
-rw-r--r--drivers/virtio/virtio_pci_common.c3
-rw-r--r--include/asm-generic/pci.h13
-rw-r--r--include/linux/pci.h44
-rw-r--r--include/linux/types.h12
76 files changed, 1396 insertions, 1125 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 0f7afb2bb442..aef8cc5a677b 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -25,13 +25,18 @@ physical addresses. These are the addresses in /proc/iomem. The physical
25address is not directly useful to a driver; it must use ioremap() to map 25address is not directly useful to a driver; it must use ioremap() to map
26the space and produce a virtual address. 26the space and produce a virtual address.
27 27
28I/O devices use a third kind of address: a "bus address" or "DMA address". 28I/O devices use a third kind of address: a "bus address". If a device has
29If a device has registers at an MMIO address, or if it performs DMA to read 29registers at an MMIO address, or if it performs DMA to read or write system
30or write system memory, the addresses used by the device are bus addresses. 30memory, the addresses used by the device are bus addresses. In some
31In some systems, bus addresses are identical to CPU physical addresses, but 31systems, bus addresses are identical to CPU physical addresses, but in
32in general they are not. IOMMUs and host bridges can produce arbitrary 32general they are not. IOMMUs and host bridges can produce arbitrary
33mappings between physical and bus addresses. 33mappings between physical and bus addresses.
34 34
35From a device's point of view, DMA uses the bus address space, but it may
36be restricted to a subset of that space. For example, even if a system
37supports 64-bit addresses for main memory and PCI BARs, it may use an IOMMU
38so devices only need to use 32-bit DMA addresses.
39
35Here's a picture and some examples: 40Here's a picture and some examples:
36 41
37 CPU CPU Bus 42 CPU CPU Bus
@@ -72,11 +77,11 @@ can use virtual address X to access the buffer, but the device itself
72cannot because DMA doesn't go through the CPU virtual memory system. 77cannot because DMA doesn't go through the CPU virtual memory system.
73 78
74In some simple systems, the device can do DMA directly to physical address 79In some simple systems, the device can do DMA directly to physical address
75Y. But in many others, there is IOMMU hardware that translates bus 80Y. But in many others, there is IOMMU hardware that translates DMA
76addresses to physical addresses, e.g., it translates Z to Y. This is part 81addresses to physical addresses, e.g., it translates Z to Y. This is part
77of the reason for the DMA API: the driver can give a virtual address X to 82of the reason for the DMA API: the driver can give a virtual address X to
78an interface like dma_map_single(), which sets up any required IOMMU 83an interface like dma_map_single(), which sets up any required IOMMU
79mapping and returns the bus address Z. The driver then tells the device to 84mapping and returns the DMA address Z. The driver then tells the device to
80do DMA to Z, and the IOMMU maps it to the buffer at address Y in system 85do DMA to Z, and the IOMMU maps it to the buffer at address Y in system
81RAM. 86RAM.
82 87
@@ -98,7 +103,7 @@ First of all, you should make sure
98#include <linux/dma-mapping.h> 103#include <linux/dma-mapping.h>
99 104
100is in your driver, which provides the definition of dma_addr_t. This type 105is in your driver, which provides the definition of dma_addr_t. This type
101can hold any valid DMA or bus address for the platform and should be used 106can hold any valid DMA address for the platform and should be used
102everywhere you hold a DMA address returned from the DMA mapping functions. 107everywhere you hold a DMA address returned from the DMA mapping functions.
103 108
104 What memory is DMA'able? 109 What memory is DMA'able?
@@ -316,7 +321,7 @@ There are two types of DMA mappings:
316 Think of "consistent" as "synchronous" or "coherent". 321 Think of "consistent" as "synchronous" or "coherent".
317 322
318 The current default is to return consistent memory in the low 32 323 The current default is to return consistent memory in the low 32
319 bits of the bus space. However, for future compatibility you should 324 bits of the DMA space. However, for future compatibility you should
320 set the consistent mask even if this default is fine for your 325 set the consistent mask even if this default is fine for your
321 driver. 326 driver.
322 327
@@ -403,7 +408,7 @@ dma_alloc_coherent() returns two values: the virtual address which you
403can use to access it from the CPU and dma_handle which you pass to the 408can use to access it from the CPU and dma_handle which you pass to the
404card. 409card.
405 410
406The CPU virtual address and the DMA bus address are both 411The CPU virtual address and the DMA address are both
407guaranteed to be aligned to the smallest PAGE_SIZE order which 412guaranteed to be aligned to the smallest PAGE_SIZE order which
408is greater than or equal to the requested size. This invariant 413is greater than or equal to the requested size. This invariant
409exists (for example) to guarantee that if you allocate a chunk 414exists (for example) to guarantee that if you allocate a chunk
@@ -645,8 +650,8 @@ PLEASE NOTE: The 'nents' argument to the dma_unmap_sg call must be
645 dma_map_sg call. 650 dma_map_sg call.
646 651
647Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}() 652Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}()
648counterpart, because the bus address space is a shared resource and 653counterpart, because the DMA address space is a shared resource and
649you could render the machine unusable by consuming all bus addresses. 654you could render the machine unusable by consuming all DMA addresses.
650 655
651If you need to use the same streaming DMA region multiple times and touch 656If you need to use the same streaming DMA region multiple times and touch
652the data in between the DMA transfers, the buffer needs to be synced 657the data in between the DMA transfers, the buffer needs to be synced
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 52088408668a..7eba542eff7c 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -18,10 +18,10 @@ Part I - dma_ API
18To get the dma_ API, you must #include <linux/dma-mapping.h>. This 18To get the dma_ API, you must #include <linux/dma-mapping.h>. This
19provides dma_addr_t and the interfaces described below. 19provides dma_addr_t and the interfaces described below.
20 20
21A dma_addr_t can hold any valid DMA or bus address for the platform. It 21A dma_addr_t can hold any valid DMA address for the platform. It can be
22can be given to a device to use as a DMA source or target. A CPU cannot 22given to a device to use as a DMA source or target. A CPU cannot reference
23reference a dma_addr_t directly because there may be translation between 23a dma_addr_t directly because there may be translation between its physical
24its physical address space and the bus address space. 24address space and the DMA address space.
25 25
26Part Ia - Using large DMA-coherent buffers 26Part Ia - Using large DMA-coherent buffers
27------------------------------------------ 27------------------------------------------
@@ -42,7 +42,7 @@ It returns a pointer to the allocated region (in the processor's virtual
42address space) or NULL if the allocation failed. 42address space) or NULL if the allocation failed.
43 43
44It also returns a <dma_handle> which may be cast to an unsigned integer the 44It also returns a <dma_handle> which may be cast to an unsigned integer the
45same width as the bus and given to the device as the bus address base of 45same width as the bus and given to the device as the DMA address base of
46the region. 46the region.
47 47
48Note: consistent memory can be expensive on some platforms, and the 48Note: consistent memory can be expensive on some platforms, and the
@@ -193,7 +193,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
193 enum dma_data_direction direction) 193 enum dma_data_direction direction)
194 194
195Maps a piece of processor virtual memory so it can be accessed by the 195Maps a piece of processor virtual memory so it can be accessed by the
196device and returns the bus address of the memory. 196device and returns the DMA address of the memory.
197 197
198The direction for both APIs may be converted freely by casting. 198The direction for both APIs may be converted freely by casting.
199However the dma_ API uses a strongly typed enumerator for its 199However the dma_ API uses a strongly typed enumerator for its
@@ -212,20 +212,20 @@ contiguous piece of memory. For this reason, memory to be mapped by
212this API should be obtained from sources which guarantee it to be 212this API should be obtained from sources which guarantee it to be
213physically contiguous (like kmalloc). 213physically contiguous (like kmalloc).
214 214
215Further, the bus address of the memory must be within the 215Further, the DMA address of the memory must be within the
216dma_mask of the device (the dma_mask is a bit mask of the 216dma_mask of the device (the dma_mask is a bit mask of the
217addressable region for the device, i.e., if the bus address of 217addressable region for the device, i.e., if the DMA address of
218the memory ANDed with the dma_mask is still equal to the bus 218the memory ANDed with the dma_mask is still equal to the DMA
219address, then the device can perform DMA to the memory). To 219address, then the device can perform DMA to the memory). To
220ensure that the memory allocated by kmalloc is within the dma_mask, 220ensure that the memory allocated by kmalloc is within the dma_mask,
221the driver may specify various platform-dependent flags to restrict 221the driver may specify various platform-dependent flags to restrict
222the bus address range of the allocation (e.g., on x86, GFP_DMA 222the DMA address range of the allocation (e.g., on x86, GFP_DMA
223guarantees to be within the first 16MB of available bus addresses, 223guarantees to be within the first 16MB of available DMA addresses,
224as required by ISA devices). 224as required by ISA devices).
225 225
226Note also that the above constraints on physical contiguity and 226Note also that the above constraints on physical contiguity and
227dma_mask may not apply if the platform has an IOMMU (a device which 227dma_mask may not apply if the platform has an IOMMU (a device which
228maps an I/O bus address to a physical memory address). However, to be 228maps an I/O DMA address to a physical memory address). However, to be
229portable, device driver writers may *not* assume that such an IOMMU 229portable, device driver writers may *not* assume that such an IOMMU
230exists. 230exists.
231 231
@@ -296,7 +296,7 @@ reduce current DMA mapping usage or delay and try again later).
296 dma_map_sg(struct device *dev, struct scatterlist *sg, 296 dma_map_sg(struct device *dev, struct scatterlist *sg,
297 int nents, enum dma_data_direction direction) 297 int nents, enum dma_data_direction direction)
298 298
299Returns: the number of bus address segments mapped (this may be shorter 299Returns: the number of DMA address segments mapped (this may be shorter
300than <nents> passed in if some elements of the scatter/gather list are 300than <nents> passed in if some elements of the scatter/gather list are
301physically or virtually adjacent and an IOMMU maps them with a single 301physically or virtually adjacent and an IOMMU maps them with a single
302entry). 302entry).
@@ -340,7 +340,7 @@ must be the same as those and passed in to the scatter/gather mapping
340API. 340API.
341 341
342Note: <nents> must be the number you passed in, *not* the number of 342Note: <nents> must be the number you passed in, *not* the number of
343bus address entries returned. 343DMA address entries returned.
344 344
345void 345void
346dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 346dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
@@ -507,7 +507,7 @@ it's asked for coherent memory for this device.
507phys_addr is the CPU physical address to which the memory is currently 507phys_addr is the CPU physical address to which the memory is currently
508assigned (this will be ioremapped so the CPU can access the region). 508assigned (this will be ioremapped so the CPU can access the region).
509 509
510device_addr is the bus address the device needs to be programmed 510device_addr is the DMA address the device needs to be programmed
511with to actually address this memory (this will be handed out as the 511with to actually address this memory (this will be handed out as the
512dma_addr_t in dma_alloc_coherent()). 512dma_addr_t in dma_alloc_coherent()).
513 513
diff --git a/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt b/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
new file mode 100644
index 000000000000..36d881c8e6d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
@@ -0,0 +1,68 @@
1* AppliedMicro X-Gene v1 PCIe MSI controller
2
3Required properties:
4
5- compatible: should be "apm,xgene1-msi" to identify
6 X-Gene v1 PCIe MSI controller block.
7- msi-controller: indicates that this is X-Gene v1 PCIe MSI controller node
8- reg: physical base address (0x79000000) and length (0x900000) for controller
9 registers. These registers include the MSI termination address and data
10 registers as well as the MSI interrupt status registers.
11- reg-names: not required
12- interrupts: A list of 16 interrupt outputs of the controller, starting from
13 interrupt number 0x10 to 0x1f.
14- interrupt-names: not required
15
16Each PCIe node needs to have property msi-parent that points to msi controller node
17
18Examples:
19
20SoC DTSI:
21
22 + MSI node:
23 msi@79000000 {
24 compatible = "apm,xgene1-msi";
25 msi-controller;
26 reg = <0x00 0x79000000 0x0 0x900000>;
27 interrupts = <0x0 0x10 0x4>
28 <0x0 0x11 0x4>
29 <0x0 0x12 0x4>
30 <0x0 0x13 0x4>
31 <0x0 0x14 0x4>
32 <0x0 0x15 0x4>
33 <0x0 0x16 0x4>
34 <0x0 0x17 0x4>
35 <0x0 0x18 0x4>
36 <0x0 0x19 0x4>
37 <0x0 0x1a 0x4>
38 <0x0 0x1b 0x4>
39 <0x0 0x1c 0x4>
40 <0x0 0x1d 0x4>
41 <0x0 0x1e 0x4>
42 <0x0 0x1f 0x4>;
43 };
44
45 + PCIe controller node with msi-parent property pointing to MSI node:
46 pcie0: pcie@1f2b0000 {
47 status = "disabled";
48 device_type = "pci";
49 compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
50 #interrupt-cells = <1>;
51 #size-cells = <2>;
52 #address-cells = <3>;
53 reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
54 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
55 reg-names = "csr", "cfg";
56 ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
57 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
58 dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
59 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
60 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
61 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
62 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
63 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
64 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
65 dma-coherent;
66 clocks = <&pcie0clk 0>;
67 msi-parent= <&msi>;
68 };
diff --git a/MAINTAINERS b/MAINTAINERS
index 9eb01405313c..663bc8ed1860 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7611,6 +7611,14 @@ L: linux-pci@vger.kernel.org
7611S: Maintained 7611S: Maintained
7612F: drivers/pci/host/*spear* 7612F: drivers/pci/host/*spear*
7613 7613
7614PCI MSI DRIVER FOR APPLIEDMICRO XGENE
7615M: Duc Dang <dhdang@apm.com>
7616L: linux-pci@vger.kernel.org
7617L: linux-arm-kernel@lists.infradead.org
7618S: Maintained
7619F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
7620F: drivers/pci/host/pci-xgene-msi.c
7621
7614PCMCIA SUBSYSTEM 7622PCMCIA SUBSYSTEM
7615P: Linux PCMCIA Team 7623P: Linux PCMCIA Team
7616L: linux-pcmcia@lists.infradead.org 7624L: linux-pcmcia@lists.infradead.org
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index f7f680f7457d..8b02afeb6319 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -71,22 +71,6 @@ extern void pcibios_set_master(struct pci_dev *dev);
71/* implement the pci_ DMA API in terms of the generic device dma_ one */ 71/* implement the pci_ DMA API in terms of the generic device dma_ one */
72#include <asm-generic/pci-dma-compat.h> 72#include <asm-generic/pci-dma-compat.h>
73 73
74static inline void pci_dma_burst_advice(struct pci_dev *pdev,
75 enum pci_dma_burst_strategy *strat,
76 unsigned long *strategy_parameter)
77{
78 unsigned long cacheline_size;
79 u8 byte;
80
81 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
82 if (byte == 0)
83 cacheline_size = 1024;
84 else
85 cacheline_size = (int) byte * 4;
86
87 *strat = PCI_DMA_BURST_BOUNDARY;
88 *strategy_parameter = cacheline_size;
89}
90#endif 74#endif
91 75
92/* TODO: integrate with include/asm-generic/pci.h ? */ 76/* TODO: integrate with include/asm-generic/pci.h ? */
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
index 00096df0f6ad..83d0a359a1b2 100644
--- a/arch/alpha/kernel/core_irongate.c
+++ b/arch/alpha/kernel/core_irongate.c
@@ -22,7 +22,6 @@
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23 23
24#include <asm/ptrace.h> 24#include <asm/ptrace.h>
25#include <asm/pci.h>
26#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
27#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
28 27
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 79d69d7f63f8..15f42083bdb3 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -22,7 +22,6 @@
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/pci.h>
26#include <asm/pgtable.h> 25#include <asm/pgtable.h>
27#include <asm/core_tsunami.h> 26#include <asm/core_tsunami.h>
28#include <asm/hwrpb.h> 27#include <asm/hwrpb.h>
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 700686d04869..2cfaa0e5c577 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -39,7 +39,6 @@
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/mmu_context.h> 40#include <asm/mmu_context.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/pci.h>
43#include <asm/pgtable.h> 42#include <asm/pgtable.h>
44#include <asm/core_irongate.h> 43#include <asm/core_irongate.h>
45#include <asm/hwrpb.h> 44#include <asm/hwrpb.h>
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 585dc33a7a24..a5635444ca41 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -31,16 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
31 */ 31 */
32#define PCI_DMA_BUS_IS_PHYS (1) 32#define PCI_DMA_BUS_IS_PHYS (1)
33 33
34#ifdef CONFIG_PCI
35static inline void pci_dma_burst_advice(struct pci_dev *pdev,
36 enum pci_dma_burst_strategy *strat,
37 unsigned long *strategy_parameter)
38{
39 *strat = PCI_DMA_BURST_INFINITY;
40 *strategy_parameter = ~0UL;
41}
42#endif
43
44#define HAVE_PCI_MMAP 34#define HAVE_PCI_MMAP
45extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 35extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
46 enum pci_mmap_state mmap_state, int write_combine); 36 enum pci_mmap_state mmap_state, int write_combine);
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index c8d3e0e86678..d8f3a1c65ecd 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -374,6 +374,28 @@
374 }; 374 };
375 }; 375 };
376 376
377 msi: msi@79000000 {
378 compatible = "apm,xgene1-msi";
379 msi-controller;
380 reg = <0x00 0x79000000 0x0 0x900000>;
381 interrupts = < 0x0 0x10 0x4
382 0x0 0x11 0x4
383 0x0 0x12 0x4
384 0x0 0x13 0x4
385 0x0 0x14 0x4
386 0x0 0x15 0x4
387 0x0 0x16 0x4
388 0x0 0x17 0x4
389 0x0 0x18 0x4
390 0x0 0x19 0x4
391 0x0 0x1a 0x4
392 0x0 0x1b 0x4
393 0x0 0x1c 0x4
394 0x0 0x1d 0x4
395 0x0 0x1e 0x4
396 0x0 0x1f 0x4>;
397 };
398
377 pcie0: pcie@1f2b0000 { 399 pcie0: pcie@1f2b0000 {
378 status = "disabled"; 400 status = "disabled";
379 device_type = "pci"; 401 device_type = "pci";
@@ -395,6 +417,7 @@
395 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; 417 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
396 dma-coherent; 418 dma-coherent;
397 clocks = <&pcie0clk 0>; 419 clocks = <&pcie0clk 0>;
420 msi-parent = <&msi>;
398 }; 421 };
399 422
400 pcie1: pcie@1f2c0000 { 423 pcie1: pcie@1f2c0000 {
@@ -418,6 +441,7 @@
418 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>; 441 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>;
419 dma-coherent; 442 dma-coherent;
420 clocks = <&pcie1clk 0>; 443 clocks = <&pcie1clk 0>;
444 msi-parent = <&msi>;
421 }; 445 };
422 446
423 pcie2: pcie@1f2d0000 { 447 pcie2: pcie@1f2d0000 {
@@ -441,6 +465,7 @@
441 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>; 465 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>;
442 dma-coherent; 466 dma-coherent;
443 clocks = <&pcie2clk 0>; 467 clocks = <&pcie2clk 0>;
468 msi-parent = <&msi>;
444 }; 469 };
445 470
446 pcie3: pcie@1f500000 { 471 pcie3: pcie@1f500000 {
@@ -464,6 +489,7 @@
464 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>; 489 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>;
465 dma-coherent; 490 dma-coherent;
466 clocks = <&pcie3clk 0>; 491 clocks = <&pcie3clk 0>;
492 msi-parent = <&msi>;
467 }; 493 };
468 494
469 pcie4: pcie@1f510000 { 495 pcie4: pcie@1f510000 {
@@ -487,6 +513,7 @@
487 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>; 513 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>;
488 dma-coherent; 514 dma-coherent;
489 clocks = <&pcie4clk 0>; 515 clocks = <&pcie4clk 0>;
516 msi-parent = <&msi>;
490 }; 517 };
491 518
492 serial0: serial@1c020000 { 519 serial0: serial@1c020000 {
diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h
index 2035a4d3f9b9..a6d4ed042c70 100644
--- a/arch/frv/include/asm/pci.h
+++ b/arch/frv/include/asm/pci.h
@@ -41,16 +41,6 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
41/* Return the index of the PCI controller for device PDEV. */ 41/* Return the index of the PCI controller for device PDEV. */
42#define pci_controller_num(PDEV) (0) 42#define pci_controller_num(PDEV) (0)
43 43
44#ifdef CONFIG_PCI
45static inline void pci_dma_burst_advice(struct pci_dev *pdev,
46 enum pci_dma_burst_strategy *strat,
47 unsigned long *strategy_parameter)
48{
49 *strat = PCI_DMA_BURST_INFINITY;
50 *strategy_parameter = ~0UL;
51}
52#endif
53
54/* 44/*
55 * These are pretty much arbitrary with the CoMEM implementation. 45 * These are pretty much arbitrary with the CoMEM implementation.
56 * We have the whole address space to ourselves. 46 * We have the whole address space to ourselves.
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 52af5ed9f60b..b897fae1f0ca 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -52,25 +52,6 @@ extern unsigned long ia64_max_iommu_merge_mask;
52 52
53#include <asm-generic/pci-dma-compat.h> 53#include <asm-generic/pci-dma-compat.h>
54 54
55#ifdef CONFIG_PCI
56static inline void pci_dma_burst_advice(struct pci_dev *pdev,
57 enum pci_dma_burst_strategy *strat,
58 unsigned long *strategy_parameter)
59{
60 unsigned long cacheline_size;
61 u8 byte;
62
63 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
64 if (byte == 0)
65 cacheline_size = 1024;
66 else
67 cacheline_size = (int) byte * 4;
68
69 *strat = PCI_DMA_BURST_MULTIPLE;
70 *strategy_parameter = cacheline_size;
71}
72#endif
73
74#define HAVE_PCI_MMAP 55#define HAVE_PCI_MMAP
75extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, 56extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
76 enum pci_mmap_state mmap_state, int write_combine); 57 enum pci_mmap_state mmap_state, int write_combine);
@@ -108,19 +89,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
108 return (pci_domain_nr(bus) != 0); 89 return (pci_domain_nr(bus) != 0);
109} 90}
110 91
111static inline struct resource *
112pcibios_select_root(struct pci_dev *pdev, struct resource *res)
113{
114 struct resource *root = NULL;
115
116 if (res->flags & IORESOURCE_IO)
117 root = &ioport_resource;
118 if (res->flags & IORESOURCE_MEM)
119 root = &iomem_resource;
120
121 return root;
122}
123
124#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ 92#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
125static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 93static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
126{ 94{
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 468aca8cec0d..fdf2e75d7033 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -44,16 +44,6 @@ struct pci_dev;
44 */ 44 */
45#define pcibios_assign_all_busses() 0 45#define pcibios_assign_all_busses() 0
46 46
47#ifdef CONFIG_PCI
48static inline void pci_dma_burst_advice(struct pci_dev *pdev,
49 enum pci_dma_burst_strategy *strat,
50 unsigned long *strategy_parameter)
51{
52 *strat = PCI_DMA_BURST_INFINITY;
53 *strategy_parameter = ~0UL;
54}
55#endif
56
57extern int pci_domain_nr(struct pci_bus *bus); 47extern int pci_domain_nr(struct pci_bus *bus);
58 48
59/* Decide whether to display the domain number in /proc */ 49/* Decide whether to display the domain number in /proc */
@@ -83,19 +73,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
83 */ 73 */
84#define PCI_DMA_BUS_IS_PHYS (1) 74#define PCI_DMA_BUS_IS_PHYS (1)
85 75
86static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
87 struct resource *res)
88{
89 struct resource *root = NULL;
90
91 if (res->flags & IORESOURCE_IO)
92 root = &ioport_resource;
93 if (res->flags & IORESOURCE_MEM)
94 root = &iomem_resource;
95
96 return root;
97}
98
99extern void pcibios_claim_one_bus(struct pci_bus *b); 76extern void pcibios_claim_one_bus(struct pci_bus *b);
100 77
101extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); 78extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index d9692993fc83..70dcc5498128 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -113,16 +113,6 @@ struct pci_dev;
113 */ 113 */
114extern unsigned int PCI_DMA_BUS_IS_PHYS; 114extern unsigned int PCI_DMA_BUS_IS_PHYS;
115 115
116#ifdef CONFIG_PCI
117static inline void pci_dma_burst_advice(struct pci_dev *pdev,
118 enum pci_dma_burst_strategy *strat,
119 unsigned long *strategy_parameter)
120{
121 *strat = PCI_DMA_BURST_INFINITY;
122 *strategy_parameter = ~0UL;
123}
124#endif
125
126#ifdef CONFIG_PCI_DOMAINS 116#ifdef CONFIG_PCI_DOMAINS
127#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index 117#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
128 118
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c
index a138e8ee5cfc..b3ab59318d91 100644
--- a/arch/mips/pci/fixup-cobalt.c
+++ b/arch/mips/pci/fixup-cobalt.c
@@ -13,7 +13,6 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h> 14#include <linux/init.h>
15 15
16#include <asm/pci.h>
17#include <asm/io.h> 16#include <asm/io.h>
18#include <asm/gt64120.h> 17#include <asm/gt64120.h>
19 18
diff --git a/arch/mips/pci/ops-mace.c b/arch/mips/pci/ops-mace.c
index 6b5821febc38..951d8070fb48 100644
--- a/arch/mips/pci/ops-mace.c
+++ b/arch/mips/pci/ops-mace.c
@@ -8,7 +8,6 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/pci.h>
12#include <asm/ip32/mace.h> 11#include <asm/ip32/mace.h>
13 12
14#if 0 13#if 0
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 8b117e638306..c5347d99cf3a 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -20,7 +20,6 @@
20#include <linux/of_irq.h> 20#include <linux/of_irq.h>
21#include <linux/of_pci.h> 21#include <linux/of_pci.h>
22 22
23#include <asm/pci.h>
24#include <asm/gpio.h> 23#include <asm/gpio.h>
25#include <asm/addrspace.h> 24#include <asm/addrspace.h>
26 25
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index 5f70af25c7d0..c222d1792d5b 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -83,19 +83,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
83/* implement the pci_ DMA API in terms of the generic device dma_ one */ 83/* implement the pci_ DMA API in terms of the generic device dma_ one */
84#include <asm-generic/pci-dma-compat.h> 84#include <asm-generic/pci-dma-compat.h>
85 85
86static inline struct resource *
87pcibios_select_root(struct pci_dev *pdev, struct resource *res)
88{
89 struct resource *root = NULL;
90
91 if (res->flags & IORESOURCE_IO)
92 root = &ioport_resource;
93 if (res->flags & IORESOURCE_MEM)
94 root = &iomem_resource;
95
96 return root;
97}
98
99static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 86static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
100{ 87{
101 return channel ? 15 : 14; 88 return channel ? 15 : 14;
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index 20df2b04fc09..bf5e044281d6 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -196,25 +196,6 @@ static inline void pcibios_register_hba(struct pci_hba_data *x)
196/* export the pci_ DMA API in terms of the dma_ one */ 196/* export the pci_ DMA API in terms of the dma_ one */
197#include <asm-generic/pci-dma-compat.h> 197#include <asm-generic/pci-dma-compat.h>
198 198
199#ifdef CONFIG_PCI
200static inline void pci_dma_burst_advice(struct pci_dev *pdev,
201 enum pci_dma_burst_strategy *strat,
202 unsigned long *strategy_parameter)
203{
204 unsigned long cacheline_size;
205 u8 byte;
206
207 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
208 if (byte == 0)
209 cacheline_size = 1024;
210 else
211 cacheline_size = (int) byte * 4;
212
213 *strat = PCI_DMA_BURST_MULTIPLE;
214 *strategy_parameter = cacheline_size;
215}
216#endif
217
218static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 199static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
219{ 200{
220 return channel ? 15 : 14; 201 return channel ? 15 : 14;
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 4aef8d660999..99dc432b256a 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -71,36 +71,6 @@ extern struct dma_map_ops *get_pci_dma_ops(void);
71 */ 71 */
72#define PCI_DISABLE_MWI 72#define PCI_DISABLE_MWI
73 73
74#ifdef CONFIG_PCI
75static inline void pci_dma_burst_advice(struct pci_dev *pdev,
76 enum pci_dma_burst_strategy *strat,
77 unsigned long *strategy_parameter)
78{
79 unsigned long cacheline_size;
80 u8 byte;
81
82 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
83 if (byte == 0)
84 cacheline_size = 1024;
85 else
86 cacheline_size = (int) byte * 4;
87
88 *strat = PCI_DMA_BURST_MULTIPLE;
89 *strategy_parameter = cacheline_size;
90}
91#endif
92
93#else /* 32-bit */
94
95#ifdef CONFIG_PCI
96static inline void pci_dma_burst_advice(struct pci_dev *pdev,
97 enum pci_dma_burst_strategy *strat,
98 unsigned long *strategy_parameter)
99{
100 *strat = PCI_DMA_BURST_INFINITY;
101 *strategy_parameter = ~0UL;
102}
103#endif
104#endif /* CONFIG_PPC64 */ 74#endif /* CONFIG_PPC64 */
105 75
106extern int pci_domain_nr(struct pci_bus *bus); 76extern int pci_domain_nr(struct pci_bus *bus);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index ea2cea7eaef1..50a508714f87 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -46,7 +46,6 @@
46#include <asm/mmu.h> 46#include <asm/mmu.h>
47#include <asm/paca.h> 47#include <asm/paca.h>
48#include <asm/pgtable.h> 48#include <asm/pgtable.h>
49#include <asm/pci.h>
50#include <asm/iommu.h> 49#include <asm/iommu.h>
51#include <asm/btext.h> 50#include <asm/btext.h>
52#include <asm/sections.h> 51#include <asm/sections.h>
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index fd1fe4c37599..fcca8077e6a2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -37,7 +37,6 @@
37#include <asm/smp.h> 37#include <asm/smp.h>
38#include <asm/mmu.h> 38#include <asm/mmu.h>
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/pci.h>
41#include <asm/iommu.h> 40#include <asm/iommu.h>
42#include <asm/btext.h> 41#include <asm/btext.h>
43#include <asm/sections.h> 42#include <asm/sections.h>
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index e2d401ad8fbb..6eb3b2abae90 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -12,7 +12,7 @@
12 12
13#undef DEBUG 13#undef DEBUG
14 14
15#include <asm/pci.h> 15#include <linux/pci.h>
16#include <asm/mpc52xx.h> 16#include <asm/mpc52xx.h>
17#include <asm/delay.h> 17#include <asm/delay.h>
18#include <asm/machdep.h> 18#include <asm/machdep.h>
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index d3236c9e226b..39e2f41b6cf0 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -9,10 +9,10 @@
9#include <linux/pfn.h> 9#include <linux/pfn.h>
10#include <linux/suspend.h> 10#include <linux/suspend.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/pci.h>
12#include <asm/ctl_reg.h> 13#include <asm/ctl_reg.h>
13#include <asm/ipl.h> 14#include <asm/ipl.h>
14#include <asm/cio.h> 15#include <asm/cio.h>
15#include <asm/pci.h>
16#include <asm/sections.h> 16#include <asm/sections.h>
17#include "entry.h" 17#include "entry.h"
18 18
diff --git a/arch/sh/drivers/pci/ops-sh5.c b/arch/sh/drivers/pci/ops-sh5.c
index 4ce95a001b80..45361946460f 100644
--- a/arch/sh/drivers/pci/ops-sh5.c
+++ b/arch/sh/drivers/pci/ops-sh5.c
@@ -18,7 +18,6 @@
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <asm/pci.h>
22#include <asm/io.h> 21#include <asm/io.h>
23#include "pci-sh5.h" 22#include "pci-sh5.h"
24 23
diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c
index 16c1e721bf54..8229114c6a58 100644
--- a/arch/sh/drivers/pci/pci-sh5.c
+++ b/arch/sh/drivers/pci/pci-sh5.c
@@ -20,7 +20,6 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <cpu/irq.h> 22#include <cpu/irq.h>
23#include <asm/pci.h>
24#include <asm/io.h> 23#include <asm/io.h>
25#include "pci-sh5.h" 24#include "pci-sh5.h"
26 25
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 5b4511552998..e343dbd02e41 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -86,24 +86,6 @@ extern void pcibios_set_master(struct pci_dev *dev);
86 * direct memory write. 86 * direct memory write.
87 */ 87 */
88#define PCI_DISABLE_MWI 88#define PCI_DISABLE_MWI
89
90static inline void pci_dma_burst_advice(struct pci_dev *pdev,
91 enum pci_dma_burst_strategy *strat,
92 unsigned long *strategy_parameter)
93{
94 unsigned long cacheline_size;
95 u8 byte;
96
97 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
98
99 if (byte == 0)
100 cacheline_size = L1_CACHE_BYTES;
101 else
102 cacheline_size = byte << 2;
103
104 *strat = PCI_DMA_BURST_MULTIPLE;
105 *strategy_parameter = cacheline_size;
106}
107#endif 89#endif
108 90
109/* Board-specific fixup routines. */ 91/* Board-specific fixup routines. */
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index 53e9b4987db0..b7c092df3134 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -22,16 +22,6 @@
22 22
23struct pci_dev; 23struct pci_dev;
24 24
25#ifdef CONFIG_PCI
26static inline void pci_dma_burst_advice(struct pci_dev *pdev,
27 enum pci_dma_burst_strategy *strat,
28 unsigned long *strategy_parameter)
29{
30 *strat = PCI_DMA_BURST_INFINITY;
31 *strategy_parameter = ~0UL;
32}
33#endif
34
35#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
36 26
37#ifndef CONFIG_LEON_PCI 27#ifndef CONFIG_LEON_PCI
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index bd00a6226169..022d16008a00 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -31,25 +31,6 @@
31#define PCI64_REQUIRED_MASK (~(u64)0) 31#define PCI64_REQUIRED_MASK (~(u64)0)
32#define PCI64_ADDR_BASE 0xfffc000000000000UL 32#define PCI64_ADDR_BASE 0xfffc000000000000UL
33 33
34#ifdef CONFIG_PCI
35static inline void pci_dma_burst_advice(struct pci_dev *pdev,
36 enum pci_dma_burst_strategy *strat,
37 unsigned long *strategy_parameter)
38{
39 unsigned long cacheline_size;
40 u8 byte;
41
42 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
43 if (byte == 0)
44 cacheline_size = 1024;
45 else
46 cacheline_size = (int) byte * 4;
47
48 *strat = PCI_DMA_BURST_BOUNDARY;
49 *strategy_parameter = cacheline_size;
50}
51#endif
52
53/* Return the index of the PCI controller for device PDEV. */ 34/* Return the index of the PCI controller for device PDEV. */
54 35
55int pci_domain_nr(struct pci_bus *bus); 36int pci_domain_nr(struct pci_bus *bus);
diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h
index 654407e98619..38b3f3785c3c 100644
--- a/arch/unicore32/include/asm/pci.h
+++ b/arch/unicore32/include/asm/pci.h
@@ -18,16 +18,6 @@
18#include <asm-generic/pci.h> 18#include <asm-generic/pci.h>
19#include <mach/hardware.h> /* for PCIBIOS_MIN_* */ 19#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
20 20
21#ifdef CONFIG_PCI
22static inline void pci_dma_burst_advice(struct pci_dev *pdev,
23 enum pci_dma_burst_strategy *strat,
24 unsigned long *strategy_parameter)
25{
26 *strat = PCI_DMA_BURST_INFINITY;
27 *strategy_parameter = ~0UL;
28}
29#endif
30
31#define HAVE_PCI_MMAP 21#define HAVE_PCI_MMAP
32extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 22extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
33 enum pci_mmap_state mmap_state, int write_combine); 23 enum pci_mmap_state mmap_state, int write_combine);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index d8c80ff32e8c..b962e0fe5658 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -80,13 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
80 80
81#ifdef CONFIG_PCI 81#ifdef CONFIG_PCI
82extern void early_quirks(void); 82extern void early_quirks(void);
83static inline void pci_dma_burst_advice(struct pci_dev *pdev,
84 enum pci_dma_burst_strategy *strat,
85 unsigned long *strategy_parameter)
86{
87 *strat = PCI_DMA_BURST_INFINITY;
88 *strategy_parameter = ~0UL;
89}
90#else 83#else
91static inline void early_quirks(void) { } 84static inline void early_quirks(void) { }
92#endif 85#endif
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 3cee10abf01d..3839628d962e 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -11,7 +11,6 @@
11#include <asm/bios_ebda.h> 11#include <asm/bios_ebda.h>
12#include <asm/paravirt.h> 12#include <asm/paravirt.h>
13#include <asm/pci_x86.h> 13#include <asm/pci_x86.h>
14#include <asm/pci.h>
15#include <asm/mpspec.h> 14#include <asm/mpspec.h>
16#include <asm/setup.h> 15#include <asm/setup.h>
17#include <asm/apic.h> 16#include <asm/apic.h>
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 14a63ed6fe09..ff9911707160 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -81,6 +81,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
81 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 81 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
82 }, 82 },
83 }, 83 },
84 /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
85 /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
86 {
87 .callback = set_use_crs,
88 .ident = "Foxconn K8M890-8237A",
89 .matches = {
90 DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
91 DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
92 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
93 },
94 },
84 95
85 /* Now for the blacklist.. */ 96 /* Now for the blacklist.. */
86 97
@@ -121,8 +132,10 @@ void __init pci_acpi_crs_quirks(void)
121{ 132{
122 int year; 133 int year;
123 134
124 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) 135 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
125 pci_use_crs = false; 136 if (iomem_resource.end <= 0xffffffff)
137 pci_use_crs = false;
138 }
126 139
127 dmi_check_system(pci_crs_quirks); 140 dmi_check_system(pci_crs_quirks);
128 141
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b1def411c0b8..4db10b189104 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -163,7 +163,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
163{ 163{
164 int segment = pci_domain_nr(dev->bus); 164 int segment = pci_domain_nr(dev->bus);
165 int bus = dev->bus->number; 165 int bus = dev->bus->number;
166 int device = PCI_SLOT(dev->devfn); 166 int device = pci_ari_enabled(dev->bus) ? 0 : PCI_SLOT(dev->devfn);
167 struct acpi_prt_entry *entry; 167 struct acpi_prt_entry *entry;
168 168
169 if (((prt->address >> 16) & 0xffff) != device || 169 if (((prt->address >> 16) & 0xffff) != device ||
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 3dc1f68b322d..6ce973187225 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3058,7 +3058,6 @@ static void cas_init_mac(struct cas *cp)
3058 /* setup core arbitration weight register */ 3058 /* setup core arbitration weight register */
3059 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); 3059 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3060 3060
3061 /* XXX Use pci_dma_burst_advice() */
3062#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 3061#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3063 /* set the infinite burst register for chips that don't have 3062 /* set the infinite burst register for chips that don't have
3064 * pci issues. 3063 * pci issues.
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 15f9b7c9e4d3..3f6738612f45 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -1313,8 +1313,6 @@ static int ntb_setup_intx(struct ntb_device *ndev)
1313 struct pci_dev *pdev = ndev->pdev; 1313 struct pci_dev *pdev = ndev->pdev;
1314 int rc; 1314 int rc;
1315 1315
1316 pci_msi_off(pdev);
1317
1318 /* Verify intx is enabled */ 1316 /* Verify intx is enabled */
1319 pci_intx(pdev, 1); 1317 pci_intx(pdev, 1);
1320 1318
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 78a7dcbec7d8..6906a3f61bd8 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -765,7 +765,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
765 spin_lock(&io_range_lock); 765 spin_lock(&io_range_lock);
766 list_for_each_entry(res, &io_range_list, list) { 766 list_for_each_entry(res, &io_range_list, list) {
767 if (address >= res->start && address < res->start + res->size) { 767 if (address >= res->start && address < res->start + res->size) {
768 addr = res->start - address + offset; 768 addr = address - res->start + offset;
769 break; 769 break;
770 } 770 }
771 offset += res->size; 771 offset += res->size;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7a8f1c5e65af..73de4efcbe6e 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -1,6 +1,10 @@
1# 1#
2# PCI configuration 2# PCI configuration
3# 3#
4config PCI_BUS_ADDR_T_64BIT
5 def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
6 depends on PCI
7
4config PCI_MSI 8config PCI_MSI
5 bool "Message Signaled Interrupts (MSI and MSI-X)" 9 bool "Message Signaled Interrupts (MSI and MSI-X)"
6 depends on PCI 10 depends on PCI
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 90fa3a78fb7c..6fbd3f2b5992 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -92,11 +92,11 @@ void pci_bus_remove_resources(struct pci_bus *bus)
92} 92}
93 93
94static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; 94static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
95#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 95#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
96static struct pci_bus_region pci_64_bit = {0, 96static struct pci_bus_region pci_64_bit = {0,
97 (dma_addr_t) 0xffffffffffffffffULL}; 97 (pci_bus_addr_t) 0xffffffffffffffffULL};
98static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL, 98static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
99 (dma_addr_t) 0xffffffffffffffffULL}; 99 (pci_bus_addr_t) 0xffffffffffffffffULL};
100#endif 100#endif
101 101
102/* 102/*
@@ -200,7 +200,7 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
200 resource_size_t), 200 resource_size_t),
201 void *alignf_data) 201 void *alignf_data)
202{ 202{
203#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 203#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
204 int rc; 204 int rc;
205 205
206 if (res->flags & IORESOURCE_MEM_64) { 206 if (res->flags & IORESOURCE_MEM_64) {
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 1dfb567b3522..c132bddc03f3 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -89,11 +89,20 @@ config PCI_XGENE
89 depends on ARCH_XGENE 89 depends on ARCH_XGENE
90 depends on OF 90 depends on OF
91 select PCIEPORTBUS 91 select PCIEPORTBUS
92 select PCI_MSI_IRQ_DOMAIN if PCI_MSI
92 help 93 help
93 Say Y here if you want internal PCI support on APM X-Gene SoC. 94 Say Y here if you want internal PCI support on APM X-Gene SoC.
94 There are 5 internal PCIe ports available. Each port is GEN3 capable 95 There are 5 internal PCIe ports available. Each port is GEN3 capable
95 and have varied lanes from x1 to x8. 96 and have varied lanes from x1 to x8.
96 97
98config PCI_XGENE_MSI
99 bool "X-Gene v1 PCIe MSI feature"
100 depends on PCI_XGENE && PCI_MSI
101 default y
102 help
103 Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
104 This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
105
97config PCI_LAYERSCAPE 106config PCI_LAYERSCAPE
98 bool "Freescale Layerscape PCIe controller" 107 bool "Freescale Layerscape PCIe controller"
99 depends on OF && ARM 108 depends on OF && ARM
@@ -125,4 +134,15 @@ config PCIE_IPROC_PLATFORM
125 Say Y here if you want to use the Broadcom iProc PCIe controller 134 Say Y here if you want to use the Broadcom iProc PCIe controller
126 through the generic platform bus interface 135 through the generic platform bus interface
127 136
137config PCIE_IPROC_BCMA
138 bool "Broadcom iProc PCIe BCMA bus driver"
139 depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
140 select PCIE_IPROC
141 select BCMA
142 select PCI_DOMAINS
143 default ARCH_BCM_5301X
144 help
145 Say Y here if you want to use the Broadcom iProc PCIe controller
146 through the BCMA bus interface
147
128endmenu 148endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index f733b4e27642..140d66f796e4 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -11,7 +11,9 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
11obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o 11obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o 12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o 13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
14obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
14obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o 15obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
15obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o 16obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
16obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o 17obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
17obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o 18obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
19obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 2d57e19a2cd4..80db09e47800 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -93,9 +93,9 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
93 93
94static int dra7xx_pcie_establish_link(struct pcie_port *pp) 94static int dra7xx_pcie_establish_link(struct pcie_port *pp)
95{ 95{
96 u32 reg;
97 unsigned int retries = 1000;
98 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); 96 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
97 u32 reg;
98 unsigned int retries;
99 99
100 if (dw_pcie_link_up(pp)) { 100 if (dw_pcie_link_up(pp)) {
101 dev_err(pp->dev, "link is already up\n"); 101 dev_err(pp->dev, "link is already up\n");
@@ -106,19 +106,14 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
106 reg |= LTSSM_EN; 106 reg |= LTSSM_EN;
107 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 107 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
108 108
109 while (retries--) { 109 for (retries = 0; retries < 1000; retries++) {
110 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 110 if (dw_pcie_link_up(pp))
111 if (reg & LINK_UP) 111 return 0;
112 break;
113 usleep_range(10, 20); 112 usleep_range(10, 20);
114 } 113 }
115 114
116 if (retries == 0) { 115 dev_err(pp->dev, "link is not up\n");
117 dev_err(pp->dev, "link is not up\n"); 116 return -EINVAL;
118 return -ETIMEDOUT;
119 }
120
121 return 0;
122} 117}
123 118
124static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) 119static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index c139237e0e52..f9f468d9a819 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -316,9 +316,9 @@ static void exynos_pcie_assert_reset(struct pcie_port *pp)
316 316
317static int exynos_pcie_establish_link(struct pcie_port *pp) 317static int exynos_pcie_establish_link(struct pcie_port *pp)
318{ 318{
319 u32 val;
320 int count = 0;
321 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); 319 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
320 u32 val;
321 unsigned int retries;
322 322
323 if (dw_pcie_link_up(pp)) { 323 if (dw_pcie_link_up(pp)) {
324 dev_err(pp->dev, "Link already up\n"); 324 dev_err(pp->dev, "Link already up\n");
@@ -357,27 +357,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
357 PCIE_APP_LTSSM_ENABLE); 357 PCIE_APP_LTSSM_ENABLE);
358 358
359 /* check if the link is up or not */ 359 /* check if the link is up or not */
360 while (!dw_pcie_link_up(pp)) { 360 for (retries = 0; retries < 10; retries++) {
361 mdelay(100); 361 if (dw_pcie_link_up(pp)) {
362 count++; 362 dev_info(pp->dev, "Link up\n");
363 if (count == 10) { 363 return 0;
364 while (exynos_phy_readl(exynos_pcie,
365 PCIE_PHY_PLL_LOCKED) == 0) {
366 val = exynos_blk_readl(exynos_pcie,
367 PCIE_PHY_PLL_LOCKED);
368 dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
369 }
370 /* power off phy */
371 exynos_pcie_power_off_phy(pp);
372
373 dev_err(pp->dev, "PCIe Link Fail\n");
374 return -EINVAL;
375 } 364 }
365 mdelay(100);
376 } 366 }
377 367
378 dev_info(pp->dev, "Link up\n"); 368 while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
369 val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
370 dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
371 }
372 /* power off phy */
373 exynos_pcie_power_off_phy(pp);
379 374
380 return 0; 375 dev_err(pp->dev, "PCIe Link Fail\n");
376 return -EINVAL;
381} 377}
382 378
383static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) 379static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index fdb95367721e..233a196c6e66 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -47,6 +47,8 @@ struct imx6_pcie {
47#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 47#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
48#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf 48#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
49 49
50#define PCIE_RC_LCSR 0x80
51
50/* PCIe Port Logic registers (memory-mapped) */ 52/* PCIe Port Logic registers (memory-mapped) */
51#define PL_OFFSET 0x700 53#define PL_OFFSET 0x700
52#define PCIE_PL_PFLR (PL_OFFSET + 0x08) 54#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
@@ -335,21 +337,36 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
335 337
336static int imx6_pcie_wait_for_link(struct pcie_port *pp) 338static int imx6_pcie_wait_for_link(struct pcie_port *pp)
337{ 339{
338 int count = 200; 340 unsigned int retries;
339 341
340 while (!dw_pcie_link_up(pp)) { 342 for (retries = 0; retries < 200; retries++) {
343 if (dw_pcie_link_up(pp))
344 return 0;
341 usleep_range(100, 1000); 345 usleep_range(100, 1000);
342 if (--count)
343 continue;
344
345 dev_err(pp->dev, "phy link never came up\n");
346 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
347 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
348 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
349 return -EINVAL;
350 } 346 }
351 347
352 return 0; 348 dev_err(pp->dev, "phy link never came up\n");
349 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
350 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
351 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
352 return -EINVAL;
353}
354
355static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
356{
357 u32 tmp;
358 unsigned int retries;
359
360 for (retries = 0; retries < 200; retries++) {
361 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
362 /* Test if the speed change finished. */
363 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
364 return 0;
365 usleep_range(100, 1000);
366 }
367
368 dev_err(pp->dev, "Speed change timeout\n");
369 return -EINVAL;
353} 370}
354 371
355static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) 372static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
@@ -359,11 +376,11 @@ static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
359 return dw_handle_msi_irq(pp); 376 return dw_handle_msi_irq(pp);
360} 377}
361 378
362static int imx6_pcie_start_link(struct pcie_port *pp) 379static int imx6_pcie_establish_link(struct pcie_port *pp)
363{ 380{
364 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 381 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
365 uint32_t tmp; 382 u32 tmp;
366 int ret, count; 383 int ret;
367 384
368 /* 385 /*
369 * Force Gen1 operation when starting the link. In case the link is 386 * Force Gen1 operation when starting the link. In case the link is
@@ -397,29 +414,22 @@ static int imx6_pcie_start_link(struct pcie_port *pp)
397 tmp |= PORT_LOGIC_SPEED_CHANGE; 414 tmp |= PORT_LOGIC_SPEED_CHANGE;
398 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); 415 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
399 416
400 count = 200; 417 ret = imx6_pcie_wait_for_speed_change(pp);
401 while (count--) { 418 if (ret) {
402 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); 419 dev_err(pp->dev, "Failed to bring link up!\n");
403 /* Test if the speed change finished. */ 420 return ret;
404 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
405 break;
406 usleep_range(100, 1000);
407 } 421 }
408 422
409 /* Make sure link training is finished as well! */ 423 /* Make sure link training is finished as well! */
410 if (count) 424 ret = imx6_pcie_wait_for_link(pp);
411 ret = imx6_pcie_wait_for_link(pp);
412 else
413 ret = -EINVAL;
414
415 if (ret) { 425 if (ret) {
416 dev_err(pp->dev, "Failed to bring link up!\n"); 426 dev_err(pp->dev, "Failed to bring link up!\n");
417 } else { 427 return ret;
418 tmp = readl(pp->dbi_base + 0x80);
419 dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
420 } 428 }
421 429
422 return ret; 430 tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
431 dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
432 return 0;
423} 433}
424 434
425static void imx6_pcie_host_init(struct pcie_port *pp) 435static void imx6_pcie_host_init(struct pcie_port *pp)
@@ -432,7 +442,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
432 442
433 dw_pcie_setup_rc(pp); 443 dw_pcie_setup_rc(pp);
434 444
435 imx6_pcie_start_link(pp); 445 imx6_pcie_establish_link(pp);
436 446
437 if (IS_ENABLED(CONFIG_PCI_MSI)) 447 if (IS_ENABLED(CONFIG_PCI_MSI))
438 dw_pcie_msi_init(pp); 448 dw_pcie_msi_init(pp);
@@ -440,19 +450,19 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
440 450
441static void imx6_pcie_reset_phy(struct pcie_port *pp) 451static void imx6_pcie_reset_phy(struct pcie_port *pp)
442{ 452{
443 uint32_t temp; 453 u32 tmp;
444 454
445 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); 455 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
446 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 456 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
447 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 457 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
448 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); 458 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
449 459
450 usleep_range(2000, 3000); 460 usleep_range(2000, 3000);
451 461
452 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); 462 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
453 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 463 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
454 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 464 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
455 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); 465 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
456} 466}
457 467
458static int imx6_pcie_link_up(struct pcie_port *pp) 468static int imx6_pcie_link_up(struct pcie_port *pp)
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 75333b0c4f0a..b75d684aefcd 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -88,7 +88,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
88static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) 88static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
89{ 89{
90 struct pcie_port *pp = &ks_pcie->pp; 90 struct pcie_port *pp = &ks_pcie->pp;
91 int count = 200; 91 unsigned int retries;
92 92
93 dw_pcie_setup_rc(pp); 93 dw_pcie_setup_rc(pp);
94 94
@@ -99,17 +99,15 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
99 99
100 ks_dw_pcie_initiate_link_train(ks_pcie); 100 ks_dw_pcie_initiate_link_train(ks_pcie);
101 /* check if the link is up or not */ 101 /* check if the link is up or not */
102 while (!dw_pcie_link_up(pp)) { 102 for (retries = 0; retries < 200; retries++) {
103 if (dw_pcie_link_up(pp))
104 return 0;
103 usleep_range(100, 1000); 105 usleep_range(100, 1000);
104 if (--count) { 106 ks_dw_pcie_initiate_link_train(ks_pcie);
105 ks_dw_pcie_initiate_link_train(ks_pcie);
106 continue;
107 }
108 dev_err(pp->dev, "phy link never came up\n");
109 return -EINVAL;
110 } 107 }
111 108
112 return 0; 109 dev_err(pp->dev, "phy link never came up\n");
110 return -EINVAL;
113} 111}
114 112
115static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) 113static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 4a6e62f67579..b2328ea13dcf 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -62,22 +62,27 @@ static int ls_pcie_link_up(struct pcie_port *pp)
62 return 1; 62 return 1;
63} 63}
64 64
65static int ls_pcie_establish_link(struct pcie_port *pp)
66{
67 unsigned int retries;
68
69 for (retries = 0; retries < 200; retries++) {
70 if (dw_pcie_link_up(pp))
71 return 0;
72 usleep_range(100, 1000);
73 }
74
75 dev_err(pp->dev, "phy link never came up\n");
76 return -EINVAL;
77}
78
65static void ls_pcie_host_init(struct pcie_port *pp) 79static void ls_pcie_host_init(struct pcie_port *pp)
66{ 80{
67 struct ls_pcie *pcie = to_ls_pcie(pp); 81 struct ls_pcie *pcie = to_ls_pcie(pp);
68 int count = 0;
69 u32 val; 82 u32 val;
70 83
71 dw_pcie_setup_rc(pp); 84 dw_pcie_setup_rc(pp);
72 85 ls_pcie_establish_link(pp);
73 while (!ls_pcie_link_up(pp)) {
74 usleep_range(100, 1000);
75 count++;
76 if (count >= 200) {
77 dev_err(pp->dev, "phy link never came up\n");
78 return;
79 }
80 }
81 86
82 /* 87 /*
83 * LS1021A Workaround for internal TKT228622 88 * LS1021A Workaround for internal TKT228622
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 1ab863551920..70aa09556ec5 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -751,21 +751,6 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
751 return 1; 751 return 1;
752} 752}
753 753
754static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
755{
756 struct mvebu_pcie *pcie = sys_to_pcie(sys);
757 struct pci_bus *bus;
758
759 bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr,
760 &mvebu_pcie_ops, sys, &sys->resources);
761 if (!bus)
762 return NULL;
763
764 pci_scan_child_bus(bus);
765
766 return bus;
767}
768
769static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, 754static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
770 const struct resource *res, 755 const struct resource *res,
771 resource_size_t start, 756 resource_size_t start,
@@ -809,12 +794,11 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
809 hw.nr_controllers = 1; 794 hw.nr_controllers = 1;
810 hw.private_data = (void **)&pcie; 795 hw.private_data = (void **)&pcie;
811 hw.setup = mvebu_pcie_setup; 796 hw.setup = mvebu_pcie_setup;
812 hw.scan = mvebu_pcie_scan_bus;
813 hw.map_irq = of_irq_parse_and_map_pci; 797 hw.map_irq = of_irq_parse_and_map_pci;
814 hw.ops = &mvebu_pcie_ops; 798 hw.ops = &mvebu_pcie_ops;
815 hw.align_resource = mvebu_pcie_align_resource; 799 hw.align_resource = mvebu_pcie_align_resource;
816 800
817 pci_common_init(&hw); 801 pci_common_init_dev(&pcie->pdev->dev, &hw);
818} 802}
819 803
820/* 804/*
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 00e92720d7f7..10c05718dbfd 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -630,21 +630,6 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
630 return irq; 630 return irq;
631} 631}
632 632
633static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
634{
635 struct tegra_pcie *pcie = sys_to_pcie(sys);
636 struct pci_bus *bus;
637
638 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
639 &sys->resources);
640 if (!bus)
641 return NULL;
642
643 pci_scan_child_bus(bus);
644
645 return bus;
646}
647
648static irqreturn_t tegra_pcie_isr(int irq, void *arg) 633static irqreturn_t tegra_pcie_isr(int irq, void *arg)
649{ 634{
650 const char *err_msg[] = { 635 const char *err_msg[] = {
@@ -1831,7 +1816,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
1831 hw.private_data = (void **)&pcie; 1816 hw.private_data = (void **)&pcie;
1832 hw.setup = tegra_pcie_setup; 1817 hw.setup = tegra_pcie_setup;
1833 hw.map_irq = tegra_pcie_map_irq; 1818 hw.map_irq = tegra_pcie_map_irq;
1834 hw.scan = tegra_pcie_scan_bus;
1835 hw.ops = &tegra_pcie_ops; 1819 hw.ops = &tegra_pcie_ops;
1836 1820
1837 pci_common_init_dev(pcie->dev, &hw); 1821 pci_common_init_dev(pcie->dev, &hw);
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
new file mode 100644
index 000000000000..2d31d4d6fd08
--- /dev/null
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -0,0 +1,596 @@
1/*
2 * APM X-Gene MSI Driver
3 *
4 * Copyright (c) 2014, Applied Micro Circuits Corporation
5 * Author: Tanmay Inamdar <tinamdar@apm.com>
6 * Duc Dang <dhdang@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18#include <linux/cpu.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/msi.h>
22#include <linux/of_irq.h>
23#include <linux/irqchip/chained_irq.h>
24#include <linux/pci.h>
25#include <linux/platform_device.h>
26#include <linux/of_pci.h>
27
28#define MSI_IR0 0x000000
29#define MSI_INT0 0x800000
30#define IDX_PER_GROUP 8
31#define IRQS_PER_IDX 16
32#define NR_HW_IRQS 16
33#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
34
35struct xgene_msi_group {
36 struct xgene_msi *msi;
37 int gic_irq;
38 u32 msi_grp;
39};
40
41struct xgene_msi {
42 struct device_node *node;
43 struct msi_controller mchip;
44 struct irq_domain *domain;
45 u64 msi_addr;
46 void __iomem *msi_regs;
47 unsigned long *bitmap;
48 struct mutex bitmap_lock;
49 struct xgene_msi_group *msi_groups;
50 int num_cpus;
51};
52
53/* Global data */
54static struct xgene_msi xgene_msi_ctrl;
55
56static struct irq_chip xgene_msi_top_irq_chip = {
57 .name = "X-Gene1 MSI",
58 .irq_enable = pci_msi_unmask_irq,
59 .irq_disable = pci_msi_mask_irq,
60 .irq_mask = pci_msi_mask_irq,
61 .irq_unmask = pci_msi_unmask_irq,
62};
63
64static struct msi_domain_info xgene_msi_domain_info = {
65 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
66 MSI_FLAG_PCI_MSIX),
67 .chip = &xgene_msi_top_irq_chip,
68};
69
70/*
71 * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
72 * n is group number (0..F), x is index of registers in each group (0..7)
73 * The register layout is as follows:
74 * MSI0IR0 base_addr
75 * MSI0IR1 base_addr + 0x10000
76 * ... ...
77 * MSI0IR6 base_addr + 0x60000
78 * MSI0IR7 base_addr + 0x70000
79 * MSI1IR0 base_addr + 0x80000
80 * MSI1IR1 base_addr + 0x90000
81 * ... ...
82 * MSI1IR7 base_addr + 0xF0000
83 * MSI2IR0 base_addr + 0x100000
84 * ... ...
85 * MSIFIR0 base_addr + 0x780000
86 * MSIFIR1 base_addr + 0x790000
87 * ... ...
88 * MSIFIR7 base_addr + 0x7F0000
89 * MSIINT0 base_addr + 0x800000
90 * MSIINT1 base_addr + 0x810000
91 * ... ...
92 * MSIINTF base_addr + 0x8F0000
93 *
94 * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
95 * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
96 * registers.
97 *
98 * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
99 * the MSI pending status caused by 1 of its 8 index registers.
100 */
101
102/* MSInIRx read helper */
103static u32 xgene_msi_ir_read(struct xgene_msi *msi,
104 u32 msi_grp, u32 msir_idx)
105{
106 return readl_relaxed(msi->msi_regs + MSI_IR0 +
107 (msi_grp << 19) + (msir_idx << 16));
108}
109
110/* MSIINTn read helper */
111static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
112{
113 return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
114}
115
116/*
117 * With 2048 MSI vectors supported, the MSI message can be constructed using
118 * following scheme:
119 * - Divide into 8 256-vector groups
120 * Group 0: 0-255
121 * Group 1: 256-511
122 * Group 2: 512-767
123 * ...
124 * Group 7: 1792-2047
125 * - Each 256-vector group is divided into 16 16-vector groups
126 * As an example: 16 16-vector groups for 256-vector group 0-255 is
127 * Group 0: 0-15
128 * Group 1: 16-32
129 * ...
130 * Group 15: 240-255
131 * - The termination address of MSI vector in 256-vector group n and 16-vector
132 * group x is the address of MSIxIRn
133 * - The data for MSI vector in 16-vector group x is x
134 */
135static u32 hwirq_to_reg_set(unsigned long hwirq)
136{
137 return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
138}
139
140static u32 hwirq_to_group(unsigned long hwirq)
141{
142 return (hwirq % NR_HW_IRQS);
143}
144
145static u32 hwirq_to_msi_data(unsigned long hwirq)
146{
147 return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
148}
149
150static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
151{
152 struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
153 u32 reg_set = hwirq_to_reg_set(data->hwirq);
154 u32 group = hwirq_to_group(data->hwirq);
155 u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
156
157 msg->address_hi = upper_32_bits(target_addr);
158 msg->address_lo = lower_32_bits(target_addr);
159 msg->data = hwirq_to_msi_data(data->hwirq);
160}
161
162/*
163 * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
164 * the expected behaviour of .set_affinity for each MSI interrupt, the 16
165 * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
166 * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
167 * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
168 * consequence, the total MSI vectors that X-Gene v1 supports will be
169 * reduced to 256 (2048/8) vectors.
170 */
171static int hwirq_to_cpu(unsigned long hwirq)
172{
173 return (hwirq % xgene_msi_ctrl.num_cpus);
174}
175
176static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
177{
178 return (hwirq - hwirq_to_cpu(hwirq));
179}
180
181static int xgene_msi_set_affinity(struct irq_data *irqdata,
182 const struct cpumask *mask, bool force)
183{
184 int target_cpu = cpumask_first(mask);
185 int curr_cpu;
186
187 curr_cpu = hwirq_to_cpu(irqdata->hwirq);
188 if (curr_cpu == target_cpu)
189 return IRQ_SET_MASK_OK_DONE;
190
191 /* Update MSI number to target the new CPU */
192 irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
193
194 return IRQ_SET_MASK_OK;
195}
196
197static struct irq_chip xgene_msi_bottom_irq_chip = {
198 .name = "MSI",
199 .irq_set_affinity = xgene_msi_set_affinity,
200 .irq_compose_msi_msg = xgene_compose_msi_msg,
201};
202
203static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
204 unsigned int nr_irqs, void *args)
205{
206 struct xgene_msi *msi = domain->host_data;
207 int msi_irq;
208
209 mutex_lock(&msi->bitmap_lock);
210
211 msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
212 msi->num_cpus, 0);
213 if (msi_irq < NR_MSI_VEC)
214 bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
215 else
216 msi_irq = -ENOSPC;
217
218 mutex_unlock(&msi->bitmap_lock);
219
220 if (msi_irq < 0)
221 return msi_irq;
222
223 irq_domain_set_info(domain, virq, msi_irq,
224 &xgene_msi_bottom_irq_chip, domain->host_data,
225 handle_simple_irq, NULL, NULL);
226 set_irq_flags(virq, IRQF_VALID);
227
228 return 0;
229}
230
231static void xgene_irq_domain_free(struct irq_domain *domain,
232 unsigned int virq, unsigned int nr_irqs)
233{
234 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
235 struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
236 u32 hwirq;
237
238 mutex_lock(&msi->bitmap_lock);
239
240 hwirq = hwirq_to_canonical_hwirq(d->hwirq);
241 bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
242
243 mutex_unlock(&msi->bitmap_lock);
244
245 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
246}
247
248static const struct irq_domain_ops msi_domain_ops = {
249 .alloc = xgene_irq_domain_alloc,
250 .free = xgene_irq_domain_free,
251};
252
253static int xgene_allocate_domains(struct xgene_msi *msi)
254{
255 msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
256 &msi_domain_ops, msi);
257 if (!msi->domain)
258 return -ENOMEM;
259
260 msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node,
261 &xgene_msi_domain_info,
262 msi->domain);
263
264 if (!msi->mchip.domain) {
265 irq_domain_remove(msi->domain);
266 return -ENOMEM;
267 }
268
269 return 0;
270}
271
272static void xgene_free_domains(struct xgene_msi *msi)
273{
274 if (msi->mchip.domain)
275 irq_domain_remove(msi->mchip.domain);
276 if (msi->domain)
277 irq_domain_remove(msi->domain);
278}
279
280static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
281{
282 int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
283
284 xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
285 if (!xgene_msi->bitmap)
286 return -ENOMEM;
287
288 mutex_init(&xgene_msi->bitmap_lock);
289
290 xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
291 sizeof(struct xgene_msi_group),
292 GFP_KERNEL);
293 if (!xgene_msi->msi_groups)
294 return -ENOMEM;
295
296 return 0;
297}
298
299static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc)
300{
301 struct irq_chip *chip = irq_desc_get_chip(desc);
302 struct xgene_msi_group *msi_groups;
303 struct xgene_msi *xgene_msi;
304 unsigned int virq;
305 int msir_index, msir_val, hw_irq;
306 u32 intr_index, grp_select, msi_grp;
307
308 chained_irq_enter(chip, desc);
309
310 msi_groups = irq_desc_get_handler_data(desc);
311 xgene_msi = msi_groups->msi;
312 msi_grp = msi_groups->msi_grp;
313
314 /*
315 * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
316 * If bit x of this register is set (x is 0..7), one or more interupts
317 * corresponding to MSInIRx is set.
318 */
319 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
320 while (grp_select) {
321 msir_index = ffs(grp_select) - 1;
322 /*
323 * Calculate MSInIRx address to read to check for interrupts
324 * (refer to termination address and data assignment
325 * described in xgene_compose_msi_msg() )
326 */
327 msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
328 while (msir_val) {
329 intr_index = ffs(msir_val) - 1;
330 /*
331 * Calculate MSI vector number (refer to the termination
332 * address and data assignment described in
333 * xgene_compose_msi_msg function)
334 */
335 hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
336 NR_HW_IRQS) + msi_grp;
337 /*
338 * As we have multiple hw_irq that maps to single MSI,
339 * always look up the virq using the hw_irq as seen from
340 * CPU0
341 */
342 hw_irq = hwirq_to_canonical_hwirq(hw_irq);
343 virq = irq_find_mapping(xgene_msi->domain, hw_irq);
344 WARN_ON(!virq);
345 if (virq != 0)
346 generic_handle_irq(virq);
347 msir_val &= ~(1 << intr_index);
348 }
349 grp_select &= ~(1 << msir_index);
350
351 if (!grp_select) {
352 /*
353 * We handled all interrupts happened in this group,
354 * resample this group MSI_INTx register in case
355 * something else has been made pending in the meantime
356 */
357 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
358 }
359 }
360
361 chained_irq_exit(chip, desc);
362}
363
364static int xgene_msi_remove(struct platform_device *pdev)
365{
366 int virq, i;
367 struct xgene_msi *msi = platform_get_drvdata(pdev);
368
369 for (i = 0; i < NR_HW_IRQS; i++) {
370 virq = msi->msi_groups[i].gic_irq;
371 if (virq != 0) {
372 irq_set_chained_handler(virq, NULL);
373 irq_set_handler_data(virq, NULL);
374 }
375 }
376 kfree(msi->msi_groups);
377
378 kfree(msi->bitmap);
379 msi->bitmap = NULL;
380
381 xgene_free_domains(msi);
382
383 return 0;
384}
385
386static int xgene_msi_hwirq_alloc(unsigned int cpu)
387{
388 struct xgene_msi *msi = &xgene_msi_ctrl;
389 struct xgene_msi_group *msi_group;
390 cpumask_var_t mask;
391 int i;
392 int err;
393
394 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
395 msi_group = &msi->msi_groups[i];
396 if (!msi_group->gic_irq)
397 continue;
398
399 irq_set_chained_handler(msi_group->gic_irq,
400 xgene_msi_isr);
401 err = irq_set_handler_data(msi_group->gic_irq, msi_group);
402 if (err) {
403 pr_err("failed to register GIC IRQ handler\n");
404 return -EINVAL;
405 }
406 /*
407 * Statically allocate MSI GIC IRQs to each CPU core.
408 * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
409 * to each core.
410 */
411 if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
412 cpumask_clear(mask);
413 cpumask_set_cpu(cpu, mask);
414 err = irq_set_affinity(msi_group->gic_irq, mask);
415 if (err)
416 pr_err("failed to set affinity for GIC IRQ");
417 free_cpumask_var(mask);
418 } else {
419 pr_err("failed to alloc CPU mask for affinity\n");
420 err = -EINVAL;
421 }
422
423 if (err) {
424 irq_set_chained_handler(msi_group->gic_irq, NULL);
425 irq_set_handler_data(msi_group->gic_irq, NULL);
426 return err;
427 }
428 }
429
430 return 0;
431}
432
433static void xgene_msi_hwirq_free(unsigned int cpu)
434{
435 struct xgene_msi *msi = &xgene_msi_ctrl;
436 struct xgene_msi_group *msi_group;
437 int i;
438
439 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
440 msi_group = &msi->msi_groups[i];
441 if (!msi_group->gic_irq)
442 continue;
443
444 irq_set_chained_handler(msi_group->gic_irq, NULL);
445 irq_set_handler_data(msi_group->gic_irq, NULL);
446 }
447}
448
449static int xgene_msi_cpu_callback(struct notifier_block *nfb,
450 unsigned long action, void *hcpu)
451{
452 unsigned cpu = (unsigned long)hcpu;
453
454 switch (action) {
455 case CPU_ONLINE:
456 case CPU_ONLINE_FROZEN:
457 xgene_msi_hwirq_alloc(cpu);
458 break;
459 case CPU_DEAD:
460 case CPU_DEAD_FROZEN:
461 xgene_msi_hwirq_free(cpu);
462 break;
463 default:
464 break;
465 }
466
467 return NOTIFY_OK;
468}
469
470static struct notifier_block xgene_msi_cpu_notifier = {
471 .notifier_call = xgene_msi_cpu_callback,
472};
473
474static const struct of_device_id xgene_msi_match_table[] = {
475 {.compatible = "apm,xgene1-msi"},
476 {},
477};
478
479static int xgene_msi_probe(struct platform_device *pdev)
480{
481 struct resource *res;
482 int rc, irq_index;
483 struct xgene_msi *xgene_msi;
484 unsigned int cpu;
485 int virt_msir;
486 u32 msi_val, msi_idx;
487
488 xgene_msi = &xgene_msi_ctrl;
489
490 platform_set_drvdata(pdev, xgene_msi);
491
492 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
493 xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
494 if (IS_ERR(xgene_msi->msi_regs)) {
495 dev_err(&pdev->dev, "no reg space\n");
496 rc = -EINVAL;
497 goto error;
498 }
499 xgene_msi->msi_addr = res->start;
500
501 xgene_msi->num_cpus = num_possible_cpus();
502
503 rc = xgene_msi_init_allocator(xgene_msi);
504 if (rc) {
505 dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
506 goto error;
507 }
508
509 rc = xgene_allocate_domains(xgene_msi);
510 if (rc) {
511 dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
512 goto error;
513 }
514
515 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
516 virt_msir = platform_get_irq(pdev, irq_index);
517 if (virt_msir < 0) {
518 dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
519 irq_index);
520 rc = -EINVAL;
521 goto error;
522 }
523 xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
524 xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
525 xgene_msi->msi_groups[irq_index].msi = xgene_msi;
526 }
527
528 /*
529 * MSInIRx registers are read-to-clear; before registering
530 * interrupt handlers, read all of them to clear spurious
531 * interrupts that may occur before the driver is probed.
532 */
533 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
534 for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
535 msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
536 msi_idx);
537 /* Read MSIINTn to confirm */
538 msi_val = xgene_msi_int_read(xgene_msi, irq_index);
539 if (msi_val) {
540 dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
541 rc = -EINVAL;
542 goto error;
543 }
544 }
545
546 cpu_notifier_register_begin();
547
548 for_each_online_cpu(cpu)
549 if (xgene_msi_hwirq_alloc(cpu)) {
550 dev_err(&pdev->dev, "failed to register MSI handlers\n");
551 cpu_notifier_register_done();
552 goto error;
553 }
554
555 rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier);
556 if (rc) {
557 dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
558 cpu_notifier_register_done();
559 goto error;
560 }
561
562 cpu_notifier_register_done();
563
564 xgene_msi->mchip.of_node = pdev->dev.of_node;
565 rc = of_pci_msi_chip_add(&xgene_msi->mchip);
566 if (rc) {
567 dev_err(&pdev->dev, "failed to add MSI controller chip\n");
568 goto error_notifier;
569 }
570
571 dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
572
573 return 0;
574
575error_notifier:
576 unregister_hotcpu_notifier(&xgene_msi_cpu_notifier);
577error:
578 xgene_msi_remove(pdev);
579 return rc;
580}
581
582static struct platform_driver xgene_msi_driver = {
583 .driver = {
584 .name = "xgene-msi",
585 .owner = THIS_MODULE,
586 .of_match_table = xgene_msi_match_table,
587 },
588 .probe = xgene_msi_probe,
589 .remove = xgene_msi_remove,
590};
591
592static int __init xgene_pcie_msi_init(void)
593{
594 return platform_driver_register(&xgene_msi_driver);
595}
596subsys_initcall(xgene_pcie_msi_init);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index ee082c0366ec..a9dfb70d623a 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -59,6 +59,12 @@
59#define SZ_1T (SZ_1G*1024ULL) 59#define SZ_1T (SZ_1G*1024ULL)
60#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) 60#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
61 61
62#define ROOT_CAP_AND_CTRL 0x5C
63
64/* PCIe IP version */
65#define XGENE_PCIE_IP_VER_UNKN 0
66#define XGENE_PCIE_IP_VER_1 1
67
62struct xgene_pcie_port { 68struct xgene_pcie_port {
63 struct device_node *node; 69 struct device_node *node;
64 struct device *dev; 70 struct device *dev;
@@ -67,6 +73,7 @@ struct xgene_pcie_port {
67 void __iomem *cfg_base; 73 void __iomem *cfg_base;
68 unsigned long cfg_addr; 74 unsigned long cfg_addr;
69 bool link_up; 75 bool link_up;
76 u32 version;
70}; 77};
71 78
72static inline u32 pcie_bar_low_val(u32 addr, u32 flags) 79static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
@@ -130,9 +137,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
130static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, 137static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
131 int offset) 138 int offset)
132{ 139{
133 struct xgene_pcie_port *port = bus->sysdata; 140 if ((pci_is_root_bus(bus) && devfn != 0) ||
134
135 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up ||
136 xgene_pcie_hide_rc_bars(bus, offset)) 141 xgene_pcie_hide_rc_bars(bus, offset))
137 return NULL; 142 return NULL;
138 143
@@ -140,9 +145,37 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
140 return xgene_pcie_get_cfg_base(bus) + offset; 145 return xgene_pcie_get_cfg_base(bus) + offset;
141} 146}
142 147
148static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
149 int where, int size, u32 *val)
150{
151 struct xgene_pcie_port *port = bus->sysdata;
152
153 if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
154 PCIBIOS_SUCCESSFUL)
155 return PCIBIOS_DEVICE_NOT_FOUND;
156
157 /*
158 * The v1 controller has a bug in its Configuration Request
159 * Retry Status (CRS) logic: when CRS is enabled and we read the
160 * Vendor and Device ID of a non-existent device, the controller
161 * fabricates return data of 0xFFFF0001 ("device exists but is not
162 * ready") instead of 0xFFFFFFFF ("device does not exist"). This
163 * causes the PCI core to retry the read until it times out.
164 * Avoid this by not claiming to support CRS.
165 */
166 if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
167 ((where & ~0x3) == ROOT_CAP_AND_CTRL))
168 *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
169
170 if (size <= 2)
171 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
172
173 return PCIBIOS_SUCCESSFUL;
174}
175
143static struct pci_ops xgene_pcie_ops = { 176static struct pci_ops xgene_pcie_ops = {
144 .map_bus = xgene_pcie_map_bus, 177 .map_bus = xgene_pcie_map_bus,
145 .read = pci_generic_config_read32, 178 .read = xgene_pcie_config_read32,
146 .write = pci_generic_config_write32, 179 .write = pci_generic_config_write32,
147}; 180};
148 181
@@ -468,6 +501,23 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
468 return 0; 501 return 0;
469} 502}
470 503
504static int xgene_pcie_msi_enable(struct pci_bus *bus)
505{
506 struct device_node *msi_node;
507
508 msi_node = of_parse_phandle(bus->dev.of_node,
509 "msi-parent", 0);
510 if (!msi_node)
511 return -ENODEV;
512
513 bus->msi = of_pci_find_msi_chip_by_node(msi_node);
514 if (!bus->msi)
515 return -ENODEV;
516
517 bus->msi->dev = &bus->dev;
518 return 0;
519}
520
471static int xgene_pcie_probe_bridge(struct platform_device *pdev) 521static int xgene_pcie_probe_bridge(struct platform_device *pdev)
472{ 522{
473 struct device_node *dn = pdev->dev.of_node; 523 struct device_node *dn = pdev->dev.of_node;
@@ -483,6 +533,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
483 port->node = of_node_get(pdev->dev.of_node); 533 port->node = of_node_get(pdev->dev.of_node);
484 port->dev = &pdev->dev; 534 port->dev = &pdev->dev;
485 535
536 port->version = XGENE_PCIE_IP_VER_UNKN;
537 if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
538 port->version = XGENE_PCIE_IP_VER_1;
539
486 ret = xgene_pcie_map_reg(port, pdev); 540 ret = xgene_pcie_map_reg(port, pdev);
487 if (ret) 541 if (ret)
488 return ret; 542 return ret;
@@ -504,6 +558,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
504 if (!bus) 558 if (!bus)
505 return -ENOMEM; 559 return -ENOMEM;
506 560
561 if (IS_ENABLED(CONFIG_PCI_MSI))
562 if (xgene_pcie_msi_enable(bus))
563 dev_info(port->dev, "failed to enable MSI\n");
564
507 pci_scan_child_bus(bus); 565 pci_scan_child_bus(bus);
508 pci_assign_unassigned_bus_resources(bus); 566 pci_assign_unassigned_bus_resources(bus);
509 pci_bus_add_devices(bus); 567 pci_bus_add_devices(bus);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 2e9f84fdd9ce..69486be7181e 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -31,6 +31,7 @@
31#define PORT_LINK_MODE_1_LANES (0x1 << 16) 31#define PORT_LINK_MODE_1_LANES (0x1 << 16)
32#define PORT_LINK_MODE_2_LANES (0x3 << 16) 32#define PORT_LINK_MODE_2_LANES (0x3 << 16)
33#define PORT_LINK_MODE_4_LANES (0x7 << 16) 33#define PORT_LINK_MODE_4_LANES (0x7 << 16)
34#define PORT_LINK_MODE_8_LANES (0xf << 16)
34 35
35#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C 36#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
36#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) 37#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
@@ -38,6 +39,7 @@
38#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) 39#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
39#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) 40#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
40#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) 41#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
42#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
41 43
42#define PCIE_MSI_ADDR_LO 0x820 44#define PCIE_MSI_ADDR_LO 0x820
43#define PCIE_MSI_ADDR_HI 0x824 45#define PCIE_MSI_ADDR_HI 0x824
@@ -150,6 +152,21 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
150 return ret; 152 return ret;
151} 153}
152 154
155static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
156 int type, u64 cpu_addr, u64 pci_addr, u32 size)
157{
158 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
159 PCIE_ATU_VIEWPORT);
160 dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
161 dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
162 dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
163 PCIE_ATU_LIMIT);
164 dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
165 dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
166 dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
167 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
168}
169
153static struct irq_chip dw_msi_irq_chip = { 170static struct irq_chip dw_msi_irq_chip = {
154 .name = "PCI-MSI", 171 .name = "PCI-MSI",
155 .irq_enable = pci_msi_unmask_irq, 172 .irq_enable = pci_msi_unmask_irq,
@@ -493,6 +510,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
493 if (pp->ops->host_init) 510 if (pp->ops->host_init)
494 pp->ops->host_init(pp); 511 pp->ops->host_init(pp);
495 512
513 if (!pp->ops->rd_other_conf)
514 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
515 PCIE_ATU_TYPE_MEM, pp->mem_mod_base,
516 pp->mem_bus_addr, pp->mem_size);
517
496 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); 518 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
497 519
498 /* program correct class for RC */ 520 /* program correct class for RC */
@@ -515,115 +537,73 @@ int dw_pcie_host_init(struct pcie_port *pp)
515 return 0; 537 return 0;
516} 538}
517 539
518static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
519{
520 /* Program viewport 0 : OUTBOUND : CFG0 */
521 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
522 PCIE_ATU_VIEWPORT);
523 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
524 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
525 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
526 PCIE_ATU_LIMIT);
527 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
528 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
529 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
530 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
531}
532
533static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
534{
535 /* Program viewport 1 : OUTBOUND : CFG1 */
536 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
537 PCIE_ATU_VIEWPORT);
538 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
539 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
540 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
541 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
542 PCIE_ATU_LIMIT);
543 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
544 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
545 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
546}
547
548static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
549{
550 /* Program viewport 0 : OUTBOUND : MEM */
551 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
552 PCIE_ATU_VIEWPORT);
553 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
554 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
555 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
556 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
557 PCIE_ATU_LIMIT);
558 dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
559 dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
560 PCIE_ATU_UPPER_TARGET);
561 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
562}
563
564static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
565{
566 /* Program viewport 1 : OUTBOUND : IO */
567 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
568 PCIE_ATU_VIEWPORT);
569 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
570 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
571 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
572 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
573 PCIE_ATU_LIMIT);
574 dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
575 dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
576 PCIE_ATU_UPPER_TARGET);
577 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
578}
579
580static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 540static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
581 u32 devfn, int where, int size, u32 *val) 541 u32 devfn, int where, int size, u32 *val)
582{ 542{
583 int ret = PCIBIOS_SUCCESSFUL; 543 int ret, type;
584 u32 address, busdev; 544 u32 address, busdev, cfg_size;
545 u64 cpu_addr;
546 void __iomem *va_cfg_base;
585 547
586 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 548 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
587 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 549 PCIE_ATU_FUNC(PCI_FUNC(devfn));
588 address = where & ~0x3; 550 address = where & ~0x3;
589 551
590 if (bus->parent->number == pp->root_bus_nr) { 552 if (bus->parent->number == pp->root_bus_nr) {
591 dw_pcie_prog_viewport_cfg0(pp, busdev); 553 type = PCIE_ATU_TYPE_CFG0;
592 ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, 554 cpu_addr = pp->cfg0_mod_base;
593 val); 555 cfg_size = pp->cfg0_size;
594 dw_pcie_prog_viewport_mem_outbound(pp); 556 va_cfg_base = pp->va_cfg0_base;
595 } else { 557 } else {
596 dw_pcie_prog_viewport_cfg1(pp, busdev); 558 type = PCIE_ATU_TYPE_CFG1;
597 ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, 559 cpu_addr = pp->cfg1_mod_base;
598 val); 560 cfg_size = pp->cfg1_size;
599 dw_pcie_prog_viewport_io_outbound(pp); 561 va_cfg_base = pp->va_cfg1_base;
600 } 562 }
601 563
564 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
565 type, cpu_addr,
566 busdev, cfg_size);
567 ret = dw_pcie_cfg_read(va_cfg_base + address, where, size, val);
568 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
569 PCIE_ATU_TYPE_IO, pp->io_mod_base,
570 pp->io_bus_addr, pp->io_size);
571
602 return ret; 572 return ret;
603} 573}
604 574
605static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 575static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
606 u32 devfn, int where, int size, u32 val) 576 u32 devfn, int where, int size, u32 val)
607{ 577{
608 int ret = PCIBIOS_SUCCESSFUL; 578 int ret, type;
609 u32 address, busdev; 579 u32 address, busdev, cfg_size;
580 u64 cpu_addr;
581 void __iomem *va_cfg_base;
610 582
611 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 583 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
612 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 584 PCIE_ATU_FUNC(PCI_FUNC(devfn));
613 address = where & ~0x3; 585 address = where & ~0x3;
614 586
615 if (bus->parent->number == pp->root_bus_nr) { 587 if (bus->parent->number == pp->root_bus_nr) {
616 dw_pcie_prog_viewport_cfg0(pp, busdev); 588 type = PCIE_ATU_TYPE_CFG0;
617 ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, 589 cpu_addr = pp->cfg0_mod_base;
618 val); 590 cfg_size = pp->cfg0_size;
619 dw_pcie_prog_viewport_mem_outbound(pp); 591 va_cfg_base = pp->va_cfg0_base;
620 } else { 592 } else {
621 dw_pcie_prog_viewport_cfg1(pp, busdev); 593 type = PCIE_ATU_TYPE_CFG1;
622 ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, 594 cpu_addr = pp->cfg1_mod_base;
623 val); 595 cfg_size = pp->cfg1_size;
624 dw_pcie_prog_viewport_io_outbound(pp); 596 va_cfg_base = pp->va_cfg1_base;
625 } 597 }
626 598
599 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
600 type, cpu_addr,
601 busdev, cfg_size);
602 ret = dw_pcie_cfg_write(va_cfg_base + address, where, size, val);
603 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
604 PCIE_ATU_TYPE_IO, pp->io_mod_base,
605 pp->io_bus_addr, pp->io_size);
606
627 return ret; 607 return ret;
628} 608}
629 609
@@ -728,13 +708,11 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
728 struct pcie_port *pp = sys_to_pcie(sys); 708 struct pcie_port *pp = sys_to_pcie(sys);
729 709
730 pp->root_bus_nr = sys->busnr; 710 pp->root_bus_nr = sys->busnr;
731 bus = pci_create_root_bus(pp->dev, sys->busnr, 711 bus = pci_scan_root_bus(pp->dev, sys->busnr,
732 &dw_pcie_ops, sys, &sys->resources); 712 &dw_pcie_ops, sys, &sys->resources);
733 if (!bus) 713 if (!bus)
734 return NULL; 714 return NULL;
735 715
736 pci_scan_child_bus(bus);
737
738 if (bus && pp->ops->scan_bus) 716 if (bus && pp->ops->scan_bus)
739 pp->ops->scan_bus(pp); 717 pp->ops->scan_bus(pp);
740 718
@@ -778,6 +756,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
778 case 4: 756 case 4:
779 val |= PORT_LINK_MODE_4_LANES; 757 val |= PORT_LINK_MODE_4_LANES;
780 break; 758 break;
759 case 8:
760 val |= PORT_LINK_MODE_8_LANES;
761 break;
781 } 762 }
782 dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); 763 dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
783 764
@@ -794,6 +775,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
794 case 4: 775 case 4:
795 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 776 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
796 break; 777 break;
778 case 8:
779 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
780 break;
797 } 781 }
798 dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); 782 dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
799 783
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
new file mode 100644
index 000000000000..96a7d999fd5e
--- /dev/null
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright (C) 2015 Broadcom Corporation
3 * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
8 *
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/phy/phy.h>
20#include <linux/bcma/bcma.h>
21#include <linux/ioport.h>
22
23#include "pcie-iproc.h"
24
25
26/* NS: CLASS field is R/O, and set to wrong 0x200 value */
27static void bcma_pcie2_fixup_class(struct pci_dev *dev)
28{
29 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
30}
31DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
32DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
33
34static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
35{
36 struct pci_sys_data *sys = dev->sysdata;
37 struct iproc_pcie *pcie = sys->private_data;
38 struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
39
40 return bcma_core_irq(bdev, 5);
41}
42
43static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
44{
45 struct iproc_pcie *pcie;
46 LIST_HEAD(res);
47 struct resource res_mem;
48 int ret;
49
50 pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
51 if (!pcie)
52 return -ENOMEM;
53
54 pcie->dev = &bdev->dev;
55 bcma_set_drvdata(bdev, pcie);
56
57 pcie->base = bdev->io_addr;
58
59 res_mem.start = bdev->addr_s[0];
60 res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
61 res_mem.name = "PCIe MEM space";
62 res_mem.flags = IORESOURCE_MEM;
63 pci_add_resource(&res, &res_mem);
64
65 pcie->map_irq = iproc_pcie_bcma_map_irq;
66
67 ret = iproc_pcie_setup(pcie, &res);
68 if (ret)
69 dev_err(pcie->dev, "PCIe controller setup failed\n");
70
71 pci_free_resource_list(&res);
72
73 return ret;
74}
75
76static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
77{
78 struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
79
80 iproc_pcie_remove(pcie);
81}
82
83static const struct bcma_device_id iproc_pcie_bcma_table[] = {
84 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS),
85 {},
86};
87MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table);
88
89static struct bcma_driver iproc_pcie_bcma_driver = {
90 .name = KBUILD_MODNAME,
91 .id_table = iproc_pcie_bcma_table,
92 .probe = iproc_pcie_bcma_probe,
93 .remove = iproc_pcie_bcma_remove,
94};
95
96static int __init iproc_pcie_bcma_init(void)
97{
98 return bcma_driver_register(&iproc_pcie_bcma_driver);
99}
100module_init(iproc_pcie_bcma_init);
101
102static void __exit iproc_pcie_bcma_exit(void)
103{
104 bcma_driver_unregister(&iproc_pcie_bcma_driver);
105}
106module_exit(iproc_pcie_bcma_exit);
107
108MODULE_AUTHOR("Hauke Mehrtens");
109MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
110MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index afad6c21fcfa..9aedc8eb2c6e 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -69,15 +69,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
69 return ret; 69 return ret;
70 } 70 }
71 71
72 pcie->resources = &res; 72 pcie->map_irq = of_irq_parse_and_map_pci;
73 73
74 ret = iproc_pcie_setup(pcie); 74 ret = iproc_pcie_setup(pcie, &res);
75 if (ret) { 75 if (ret)
76 dev_err(pcie->dev, "PCIe controller setup failed\n"); 76 dev_err(pcie->dev, "PCIe controller setup failed\n");
77 return ret;
78 }
79 77
80 return 0; 78 pci_free_resource_list(&res);
79
80 return ret;
81} 81}
82 82
83static int iproc_pcie_pltfm_remove(struct platform_device *pdev) 83static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 329e1b54528b..d77481ea553e 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -183,7 +183,7 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
183 writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN); 183 writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN);
184} 184}
185 185
186int iproc_pcie_setup(struct iproc_pcie *pcie) 186int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
187{ 187{
188 int ret; 188 int ret;
189 struct pci_bus *bus; 189 struct pci_bus *bus;
@@ -211,7 +211,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie)
211 pcie->sysdata.private_data = pcie; 211 pcie->sysdata.private_data = pcie;
212 212
213 bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, 213 bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops,
214 &pcie->sysdata, pcie->resources); 214 &pcie->sysdata, res);
215 if (!bus) { 215 if (!bus) {
216 dev_err(pcie->dev, "unable to create PCI root bus\n"); 216 dev_err(pcie->dev, "unable to create PCI root bus\n");
217 ret = -ENOMEM; 217 ret = -ENOMEM;
@@ -229,7 +229,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie)
229 229
230 pci_scan_child_bus(bus); 230 pci_scan_child_bus(bus);
231 pci_assign_unassigned_bus_resources(bus); 231 pci_assign_unassigned_bus_resources(bus);
232 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); 232 pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
233 pci_bus_add_devices(bus); 233 pci_bus_add_devices(bus);
234 234
235 return 0; 235 return 0;
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index e28075ed1856..ba0a108309cc 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -29,14 +29,14 @@
29struct iproc_pcie { 29struct iproc_pcie {
30 struct device *dev; 30 struct device *dev;
31 void __iomem *base; 31 void __iomem *base;
32 struct list_head *resources;
33 struct pci_sys_data sysdata; 32 struct pci_sys_data sysdata;
34 struct pci_bus *root_bus; 33 struct pci_bus *root_bus;
35 struct phy *phy; 34 struct phy *phy;
36 int irqs[IPROC_PCIE_MAX_NUM_IRQS]; 35 int irqs[IPROC_PCIE_MAX_NUM_IRQS];
36 int (*map_irq)(const struct pci_dev *, u8, u8);
37}; 37};
38 38
39int iproc_pcie_setup(struct iproc_pcie *pcie); 39int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
40int iproc_pcie_remove(struct iproc_pcie *pcie); 40int iproc_pcie_remove(struct iproc_pcie *pcie);
41 41
42#endif /* _PCIE_IPROC_H */ 42#endif /* _PCIE_IPROC_H */
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 020d78890719..dfec4281bd50 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -146,10 +146,10 @@ struct pcie_app_reg {
146static int spear13xx_pcie_establish_link(struct pcie_port *pp) 146static int spear13xx_pcie_establish_link(struct pcie_port *pp)
147{ 147{
148 u32 val; 148 u32 val;
149 int count = 0;
150 struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); 149 struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
151 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; 150 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
152 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 151 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
152 unsigned int retries;
153 153
154 if (dw_pcie_link_up(pp)) { 154 if (dw_pcie_link_up(pp)) {
155 dev_err(pp->dev, "link already up\n"); 155 dev_err(pp->dev, "link already up\n");
@@ -201,17 +201,16 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
201 &app_reg->app_ctrl_0); 201 &app_reg->app_ctrl_0);
202 202
203 /* check if the link is up or not */ 203 /* check if the link is up or not */
204 while (!dw_pcie_link_up(pp)) { 204 for (retries = 0; retries < 10; retries++) {
205 mdelay(100); 205 if (dw_pcie_link_up(pp)) {
206 count++; 206 dev_info(pp->dev, "link up\n");
207 if (count == 10) { 207 return 0;
208 dev_err(pp->dev, "link Fail\n");
209 return -EINVAL;
210 } 208 }
209 mdelay(100);
211 } 210 }
212 dev_info(pp->dev, "link up\n");
213 211
214 return 0; 212 dev_err(pp->dev, "link Fail\n");
213 return -EINVAL;
215} 214}
216 215
217static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) 216static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 4a9aa08b08f1..b616e7588ff4 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -61,9 +61,6 @@ pciehp-objs := pciehp_core.o \
61 pciehp_ctrl.o \ 61 pciehp_ctrl.o \
62 pciehp_pci.o \ 62 pciehp_pci.o \
63 pciehp_hpc.o 63 pciehp_hpc.o
64ifdef CONFIG_ACPI
65pciehp-objs += pciehp_acpi.o
66endif
67 64
68shpchp-objs := shpchp_core.o \ 65shpchp-objs := shpchp_core.o \
69 shpchp_ctrl.o \ 66 shpchp_ctrl.o \
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index bcb90e4888dd..ff538568a617 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -632,15 +632,14 @@ static void trim_stale_devices(struct pci_dev *dev)
632{ 632{
633 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 633 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
634 struct pci_bus *bus = dev->subordinate; 634 struct pci_bus *bus = dev->subordinate;
635 bool alive = false; 635 bool alive = dev->ignore_hotplug;
636 636
637 if (adev) { 637 if (adev) {
638 acpi_status status; 638 acpi_status status;
639 unsigned long long sta; 639 unsigned long long sta;
640 640
641 status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); 641 status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
642 alive = (ACPI_SUCCESS(status) && device_status_valid(sta)) 642 alive = alive || (ACPI_SUCCESS(status) && device_status_valid(sta));
643 || dev->ignore_hotplug;
644 } 643 }
645 if (!alive) 644 if (!alive)
646 alive = pci_device_is_present(dev); 645 alive = pci_device_is_present(dev);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index b11521953485..57cd1327346f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -132,11 +132,7 @@ struct controller {
132 132
133int pciehp_sysfs_enable_slot(struct slot *slot); 133int pciehp_sysfs_enable_slot(struct slot *slot);
134int pciehp_sysfs_disable_slot(struct slot *slot); 134int pciehp_sysfs_disable_slot(struct slot *slot);
135u8 pciehp_handle_attention_button(struct slot *p_slot); 135void pciehp_queue_interrupt_event(struct slot *slot, u32 event_type);
136u8 pciehp_handle_switch_change(struct slot *p_slot);
137u8 pciehp_handle_presence_change(struct slot *p_slot);
138u8 pciehp_handle_power_fault(struct slot *p_slot);
139void pciehp_handle_linkstate_change(struct slot *p_slot);
140int pciehp_configure_device(struct slot *p_slot); 136int pciehp_configure_device(struct slot *p_slot);
141int pciehp_unconfigure_device(struct slot *p_slot); 137int pciehp_unconfigure_device(struct slot *p_slot);
142void pciehp_queue_pushbutton_work(struct work_struct *work); 138void pciehp_queue_pushbutton_work(struct work_struct *work);
@@ -167,21 +163,4 @@ static inline const char *slot_name(struct slot *slot)
167 return hotplug_slot_name(slot->hotplug_slot); 163 return hotplug_slot_name(slot->hotplug_slot);
168} 164}
169 165
170#ifdef CONFIG_ACPI
171#include <linux/pci-acpi.h>
172
173void __init pciehp_acpi_slot_detection_init(void);
174int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
175
176static inline void pciehp_firmware_init(void)
177{
178 pciehp_acpi_slot_detection_init();
179}
180#else
181#define pciehp_firmware_init() do {} while (0)
182static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
183{
184 return 0;
185}
186#endif /* CONFIG_ACPI */
187#endif /* _PCIEHP_H */ 166#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
deleted file mode 100644
index 93cc9266e8cb..000000000000
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * ACPI related functions for PCI Express Hot Plug driver.
3 *
4 * Copyright (C) 2008 Kenji Kaneshige
5 * Copyright (C) 2008 Fujitsu Limited.
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or (at
12 * your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 */
25
26#include <linux/acpi.h>
27#include <linux/pci.h>
28#include <linux/pci_hotplug.h>
29#include <linux/slab.h>
30#include <linux/module.h>
31#include "pciehp.h"
32
33#define PCIEHP_DETECT_PCIE (0)
34#define PCIEHP_DETECT_ACPI (1)
35#define PCIEHP_DETECT_AUTO (2)
36#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO
37
38struct dummy_slot {
39 u32 number;
40 struct list_head list;
41};
42
43static int slot_detection_mode;
44static char *pciehp_detect_mode;
45module_param(pciehp_detect_mode, charp, 0444);
46MODULE_PARM_DESC(pciehp_detect_mode,
47 "Slot detection mode: pcie, acpi, auto\n"
48 " pcie - Use PCIe based slot detection\n"
49 " acpi - Use ACPI for slot detection\n"
50 " auto(default) - Auto select mode. Use acpi option if duplicate\n"
51 " slot ids are found. Otherwise, use pcie option\n");
52
53int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
54{
55 if (slot_detection_mode != PCIEHP_DETECT_ACPI)
56 return 0;
57 if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev)))
58 return 0;
59 return -ENODEV;
60}
61
62static int __init parse_detect_mode(void)
63{
64 if (!pciehp_detect_mode)
65 return PCIEHP_DETECT_DEFAULT;
66 if (!strcmp(pciehp_detect_mode, "pcie"))
67 return PCIEHP_DETECT_PCIE;
68 if (!strcmp(pciehp_detect_mode, "acpi"))
69 return PCIEHP_DETECT_ACPI;
70 if (!strcmp(pciehp_detect_mode, "auto"))
71 return PCIEHP_DETECT_AUTO;
72 warn("bad specifier '%s' for pciehp_detect_mode. Use default\n",
73 pciehp_detect_mode);
74 return PCIEHP_DETECT_DEFAULT;
75}
76
77static int __initdata dup_slot_id;
78static int __initdata acpi_slot_detected;
79static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
80
81/* Dummy driver for duplicate name detection */
82static int __init dummy_probe(struct pcie_device *dev)
83{
84 u32 slot_cap;
85 acpi_handle handle;
86 struct dummy_slot *slot, *tmp;
87 struct pci_dev *pdev = dev->port;
88
89 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
90 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
91 if (!slot)
92 return -ENOMEM;
93 slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19;
94 list_for_each_entry(tmp, &dummy_slots, list) {
95 if (tmp->number == slot->number)
96 dup_slot_id++;
97 }
98 list_add_tail(&slot->list, &dummy_slots);
99 handle = ACPI_HANDLE(&pdev->dev);
100 if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
101 acpi_slot_detected = 1;
102 return -ENODEV; /* dummy driver always returns error */
103}
104
105static struct pcie_port_service_driver __initdata dummy_driver = {
106 .name = "pciehp_dummy",
107 .port_type = PCIE_ANY_PORT,
108 .service = PCIE_PORT_SERVICE_HP,
109 .probe = dummy_probe,
110};
111
112static int __init select_detection_mode(void)
113{
114 struct dummy_slot *slot, *tmp;
115
116 if (pcie_port_service_register(&dummy_driver))
117 return PCIEHP_DETECT_ACPI;
118 pcie_port_service_unregister(&dummy_driver);
119 list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
120 list_del(&slot->list);
121 kfree(slot);
122 }
123 if (acpi_slot_detected && dup_slot_id)
124 return PCIEHP_DETECT_ACPI;
125 return PCIEHP_DETECT_PCIE;
126}
127
128void __init pciehp_acpi_slot_detection_init(void)
129{
130 slot_detection_mode = parse_detect_mode();
131 if (slot_detection_mode != PCIEHP_DETECT_AUTO)
132 goto out;
133 slot_detection_mode = select_detection_mode();
134out:
135 if (slot_detection_mode == PCIEHP_DETECT_ACPI)
136 info("Using ACPI for slot detection.\n");
137}
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 07aa722bb12c..612b21a14df5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -77,11 +77,6 @@ static int reset_slot (struct hotplug_slot *slot, int probe);
77 */ 77 */
78static void release_slot(struct hotplug_slot *hotplug_slot) 78static void release_slot(struct hotplug_slot *hotplug_slot)
79{ 79{
80 struct slot *slot = hotplug_slot->private;
81
82 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
83 __func__, hotplug_slot_name(hotplug_slot));
84
85 kfree(hotplug_slot->ops); 80 kfree(hotplug_slot->ops);
86 kfree(hotplug_slot->info); 81 kfree(hotplug_slot->info);
87 kfree(hotplug_slot); 82 kfree(hotplug_slot);
@@ -129,14 +124,10 @@ static int init_slot(struct controller *ctrl)
129 slot->hotplug_slot = hotplug; 124 slot->hotplug_slot = hotplug;
130 snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); 125 snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
131 126
132 ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n",
133 pci_domain_nr(ctrl->pcie->port->subordinate),
134 ctrl->pcie->port->subordinate->number, PSN(ctrl));
135 retval = pci_hp_register(hotplug, 127 retval = pci_hp_register(hotplug,
136 ctrl->pcie->port->subordinate, 0, name); 128 ctrl->pcie->port->subordinate, 0, name);
137 if (retval) 129 if (retval)
138 ctrl_err(ctrl, 130 ctrl_err(ctrl, "pci_hp_register failed: error %d\n", retval);
139 "pci_hp_register failed with error %d\n", retval);
140out: 131out:
141 if (retval) { 132 if (retval) {
142 kfree(ops); 133 kfree(ops);
@@ -158,9 +149,6 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
158{ 149{
159 struct slot *slot = hotplug_slot->private; 150 struct slot *slot = hotplug_slot->private;
160 151
161 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
162 __func__, slot_name(slot));
163
164 pciehp_set_attention_status(slot, status); 152 pciehp_set_attention_status(slot, status);
165 return 0; 153 return 0;
166} 154}
@@ -170,9 +158,6 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
170{ 158{
171 struct slot *slot = hotplug_slot->private; 159 struct slot *slot = hotplug_slot->private;
172 160
173 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
174 __func__, slot_name(slot));
175
176 return pciehp_sysfs_enable_slot(slot); 161 return pciehp_sysfs_enable_slot(slot);
177} 162}
178 163
@@ -181,9 +166,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
181{ 166{
182 struct slot *slot = hotplug_slot->private; 167 struct slot *slot = hotplug_slot->private;
183 168
184 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
185 __func__, slot_name(slot));
186
187 return pciehp_sysfs_disable_slot(slot); 169 return pciehp_sysfs_disable_slot(slot);
188} 170}
189 171
@@ -191,9 +173,6 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
191{ 173{
192 struct slot *slot = hotplug_slot->private; 174 struct slot *slot = hotplug_slot->private;
193 175
194 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
195 __func__, slot_name(slot));
196
197 pciehp_get_power_status(slot, value); 176 pciehp_get_power_status(slot, value);
198 return 0; 177 return 0;
199} 178}
@@ -202,9 +181,6 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
202{ 181{
203 struct slot *slot = hotplug_slot->private; 182 struct slot *slot = hotplug_slot->private;
204 183
205 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
206 __func__, slot_name(slot));
207
208 pciehp_get_attention_status(slot, value); 184 pciehp_get_attention_status(slot, value);
209 return 0; 185 return 0;
210} 186}
@@ -213,9 +189,6 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
213{ 189{
214 struct slot *slot = hotplug_slot->private; 190 struct slot *slot = hotplug_slot->private;
215 191
216 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
217 __func__, slot_name(slot));
218
219 pciehp_get_latch_status(slot, value); 192 pciehp_get_latch_status(slot, value);
220 return 0; 193 return 0;
221} 194}
@@ -224,9 +197,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
224{ 197{
225 struct slot *slot = hotplug_slot->private; 198 struct slot *slot = hotplug_slot->private;
226 199
227 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
228 __func__, slot_name(slot));
229
230 pciehp_get_adapter_status(slot, value); 200 pciehp_get_adapter_status(slot, value);
231 return 0; 201 return 0;
232} 202}
@@ -235,9 +205,6 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
235{ 205{
236 struct slot *slot = hotplug_slot->private; 206 struct slot *slot = hotplug_slot->private;
237 207
238 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
239 __func__, slot_name(slot));
240
241 return pciehp_reset_slot(slot, probe); 208 return pciehp_reset_slot(slot, probe);
242} 209}
243 210
@@ -248,24 +215,21 @@ static int pciehp_probe(struct pcie_device *dev)
248 struct slot *slot; 215 struct slot *slot;
249 u8 occupied, poweron; 216 u8 occupied, poweron;
250 217
251 if (pciehp_force) 218 /* If this is not a "hotplug" service, we have no business here. */
252 dev_info(&dev->device, 219 if (dev->service != PCIE_PORT_SERVICE_HP)
253 "Bypassing BIOS check for pciehp use on %s\n", 220 return -ENODEV;
254 pci_name(dev->port));
255 else if (pciehp_acpi_slot_detection_check(dev->port))
256 goto err_out_none;
257 221
258 if (!dev->port->subordinate) { 222 if (!dev->port->subordinate) {
259 /* Can happen if we run out of bus numbers during probe */ 223 /* Can happen if we run out of bus numbers during probe */
260 dev_err(&dev->device, 224 dev_err(&dev->device,
261 "Hotplug bridge without secondary bus, ignoring\n"); 225 "Hotplug bridge without secondary bus, ignoring\n");
262 goto err_out_none; 226 return -ENODEV;
263 } 227 }
264 228
265 ctrl = pcie_init(dev); 229 ctrl = pcie_init(dev);
266 if (!ctrl) { 230 if (!ctrl) {
267 dev_err(&dev->device, "Controller initialization failed\n"); 231 dev_err(&dev->device, "Controller initialization failed\n");
268 goto err_out_none; 232 return -ENODEV;
269 } 233 }
270 set_service_data(dev, ctrl); 234 set_service_data(dev, ctrl);
271 235
@@ -275,14 +239,14 @@ static int pciehp_probe(struct pcie_device *dev)
275 if (rc == -EBUSY) 239 if (rc == -EBUSY)
276 ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n"); 240 ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n");
277 else 241 else
278 ctrl_err(ctrl, "Slot initialization failed\n"); 242 ctrl_err(ctrl, "Slot initialization failed (%d)\n", rc);
279 goto err_out_release_ctlr; 243 goto err_out_release_ctlr;
280 } 244 }
281 245
282 /* Enable events after we have setup the data structures */ 246 /* Enable events after we have setup the data structures */
283 rc = pcie_init_notification(ctrl); 247 rc = pcie_init_notification(ctrl);
284 if (rc) { 248 if (rc) {
285 ctrl_err(ctrl, "Notification initialization failed\n"); 249 ctrl_err(ctrl, "Notification initialization failed (%d)\n", rc);
286 goto err_out_free_ctrl_slot; 250 goto err_out_free_ctrl_slot;
287 } 251 }
288 252
@@ -305,7 +269,6 @@ err_out_free_ctrl_slot:
305 cleanup_slot(ctrl); 269 cleanup_slot(ctrl);
306err_out_release_ctlr: 270err_out_release_ctlr:
307 pciehp_release_ctrl(ctrl); 271 pciehp_release_ctrl(ctrl);
308err_out_none:
309 return -ENODEV; 272 return -ENODEV;
310} 273}
311 274
@@ -366,7 +329,6 @@ static int __init pcied_init(void)
366{ 329{
367 int retval = 0; 330 int retval = 0;
368 331
369 pciehp_firmware_init();
370 retval = pcie_port_service_register(&hpdriver_portdrv); 332 retval = pcie_port_service_register(&hpdriver_portdrv);
371 dbg("pcie_port_service_register = %d\n", retval); 333 dbg("pcie_port_service_register = %d\n", retval);
372 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 334 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index f052e951b23e..f3796124ad7c 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -37,138 +37,20 @@
37 37
38static void interrupt_event_handler(struct work_struct *work); 38static void interrupt_event_handler(struct work_struct *work);
39 39
40static int queue_interrupt_event(struct slot *p_slot, u32 event_type) 40void pciehp_queue_interrupt_event(struct slot *p_slot, u32 event_type)
41{ 41{
42 struct event_info *info; 42 struct event_info *info;
43 43
44 info = kmalloc(sizeof(*info), GFP_ATOMIC); 44 info = kmalloc(sizeof(*info), GFP_ATOMIC);
45 if (!info) 45 if (!info) {
46 return -ENOMEM; 46 ctrl_err(p_slot->ctrl, "dropped event %d (ENOMEM)\n", event_type);
47 return;
48 }
47 49
50 INIT_WORK(&info->work, interrupt_event_handler);
48 info->event_type = event_type; 51 info->event_type = event_type;
49 info->p_slot = p_slot; 52 info->p_slot = p_slot;
50 INIT_WORK(&info->work, interrupt_event_handler);
51
52 queue_work(p_slot->wq, &info->work); 53 queue_work(p_slot->wq, &info->work);
53
54 return 0;
55}
56
57u8 pciehp_handle_attention_button(struct slot *p_slot)
58{
59 u32 event_type;
60 struct controller *ctrl = p_slot->ctrl;
61
62 /* Attention Button Change */
63 ctrl_dbg(ctrl, "Attention button interrupt received\n");
64
65 /*
66 * Button pressed - See if need to TAKE ACTION!!!
67 */
68 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
69 event_type = INT_BUTTON_PRESS;
70
71 queue_interrupt_event(p_slot, event_type);
72
73 return 0;
74}
75
76u8 pciehp_handle_switch_change(struct slot *p_slot)
77{
78 u8 getstatus;
79 u32 event_type;
80 struct controller *ctrl = p_slot->ctrl;
81
82 /* Switch Change */
83 ctrl_dbg(ctrl, "Switch interrupt received\n");
84
85 pciehp_get_latch_status(p_slot, &getstatus);
86 if (getstatus) {
87 /*
88 * Switch opened
89 */
90 ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
91 event_type = INT_SWITCH_OPEN;
92 } else {
93 /*
94 * Switch closed
95 */
96 ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
97 event_type = INT_SWITCH_CLOSE;
98 }
99
100 queue_interrupt_event(p_slot, event_type);
101
102 return 1;
103}
104
105u8 pciehp_handle_presence_change(struct slot *p_slot)
106{
107 u32 event_type;
108 u8 presence_save;
109 struct controller *ctrl = p_slot->ctrl;
110
111 /* Presence Change */
112 ctrl_dbg(ctrl, "Presence/Notify input change\n");
113
114 /* Switch is open, assume a presence change
115 * Save the presence state
116 */
117 pciehp_get_adapter_status(p_slot, &presence_save);
118 if (presence_save) {
119 /*
120 * Card Present
121 */
122 ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
123 event_type = INT_PRESENCE_ON;
124 } else {
125 /*
126 * Not Present
127 */
128 ctrl_info(ctrl, "Card not present on Slot(%s)\n",
129 slot_name(p_slot));
130 event_type = INT_PRESENCE_OFF;
131 }
132
133 queue_interrupt_event(p_slot, event_type);
134
135 return 1;
136}
137
138u8 pciehp_handle_power_fault(struct slot *p_slot)
139{
140 u32 event_type;
141 struct controller *ctrl = p_slot->ctrl;
142
143 /* power fault */
144 ctrl_dbg(ctrl, "Power fault interrupt received\n");
145 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
146 event_type = INT_POWER_FAULT;
147 ctrl_info(ctrl, "Power fault bit %x set\n", 0);
148 queue_interrupt_event(p_slot, event_type);
149
150 return 1;
151}
152
153void pciehp_handle_linkstate_change(struct slot *p_slot)
154{
155 u32 event_type;
156 struct controller *ctrl = p_slot->ctrl;
157
158 /* Link Status Change */
159 ctrl_dbg(ctrl, "Data Link Layer State change\n");
160
161 if (pciehp_check_link_active(ctrl)) {
162 ctrl_info(ctrl, "slot(%s): Link Up event\n",
163 slot_name(p_slot));
164 event_type = INT_LINK_UP;
165 } else {
166 ctrl_info(ctrl, "slot(%s): Link Down event\n",
167 slot_name(p_slot));
168 event_type = INT_LINK_DOWN;
169 }
170
171 queue_interrupt_event(p_slot, event_type);
172} 54}
173 55
174/* The following routines constitute the bulk of the 56/* The following routines constitute the bulk of the
@@ -298,10 +180,6 @@ static void pciehp_power_thread(struct work_struct *work)
298 180
299 switch (info->req) { 181 switch (info->req) {
300 case DISABLE_REQ: 182 case DISABLE_REQ:
301 ctrl_dbg(p_slot->ctrl,
302 "Disabling domain:bus:device=%04x:%02x:00\n",
303 pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
304 p_slot->ctrl->pcie->port->subordinate->number);
305 mutex_lock(&p_slot->hotplug_lock); 183 mutex_lock(&p_slot->hotplug_lock);
306 pciehp_disable_slot(p_slot); 184 pciehp_disable_slot(p_slot);
307 mutex_unlock(&p_slot->hotplug_lock); 185 mutex_unlock(&p_slot->hotplug_lock);
@@ -310,10 +188,6 @@ static void pciehp_power_thread(struct work_struct *work)
310 mutex_unlock(&p_slot->lock); 188 mutex_unlock(&p_slot->lock);
311 break; 189 break;
312 case ENABLE_REQ: 190 case ENABLE_REQ:
313 ctrl_dbg(p_slot->ctrl,
314 "Enabling domain:bus:device=%04x:%02x:00\n",
315 pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
316 p_slot->ctrl->pcie->port->subordinate->number);
317 mutex_lock(&p_slot->hotplug_lock); 191 mutex_lock(&p_slot->hotplug_lock);
318 ret = pciehp_enable_slot(p_slot); 192 ret = pciehp_enable_slot(p_slot);
319 mutex_unlock(&p_slot->hotplug_lock); 193 mutex_unlock(&p_slot->hotplug_lock);
@@ -416,7 +290,7 @@ static void handle_button_press_event(struct slot *p_slot)
416 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); 290 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
417 break; 291 break;
418 default: 292 default:
419 ctrl_warn(ctrl, "Not a valid state\n"); 293 ctrl_warn(ctrl, "ignoring invalid state %#x\n", p_slot->state);
420 break; 294 break;
421 } 295 }
422} 296}
@@ -507,8 +381,8 @@ static void handle_link_event(struct slot *p_slot, u32 event)
507 } 381 }
508 break; 382 break;
509 default: 383 default:
510 ctrl_err(ctrl, "Not a valid state on slot(%s)\n", 384 ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n",
511 slot_name(p_slot)); 385 p_slot->state, slot_name(p_slot));
512 kfree(info); 386 kfree(info);
513 break; 387 break;
514 } 388 }
@@ -532,7 +406,6 @@ static void interrupt_event_handler(struct work_struct *work)
532 pciehp_green_led_off(p_slot); 406 pciehp_green_led_off(p_slot);
533 break; 407 break;
534 case INT_PRESENCE_ON: 408 case INT_PRESENCE_ON:
535 ctrl_dbg(ctrl, "Surprise Insertion\n");
536 handle_surprise_event(p_slot); 409 handle_surprise_event(p_slot);
537 break; 410 break;
538 case INT_PRESENCE_OFF: 411 case INT_PRESENCE_OFF:
@@ -540,7 +413,6 @@ static void interrupt_event_handler(struct work_struct *work)
540 * Regardless of surprise capability, we need to 413 * Regardless of surprise capability, we need to
541 * definitely remove a card that has been pulled out! 414 * definitely remove a card that has been pulled out!
542 */ 415 */
543 ctrl_dbg(ctrl, "Surprise Removal\n");
544 handle_surprise_event(p_slot); 416 handle_surprise_event(p_slot);
545 break; 417 break;
546 case INT_LINK_UP: 418 case INT_LINK_UP:
@@ -647,8 +519,8 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
647 slot_name(p_slot)); 519 slot_name(p_slot));
648 break; 520 break;
649 default: 521 default:
650 ctrl_err(ctrl, "Not a valid state on slot %s\n", 522 ctrl_err(ctrl, "invalid state %#x on slot %s\n",
651 slot_name(p_slot)); 523 p_slot->state, slot_name(p_slot));
652 break; 524 break;
653 } 525 }
654 mutex_unlock(&p_slot->lock); 526 mutex_unlock(&p_slot->lock);
@@ -682,8 +554,8 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
682 slot_name(p_slot)); 554 slot_name(p_slot));
683 break; 555 break;
684 default: 556 default:
685 ctrl_err(ctrl, "Not a valid state on slot %s\n", 557 ctrl_err(ctrl, "invalid state %#x on slot %s\n",
686 slot_name(p_slot)); 558 p_slot->state, slot_name(p_slot));
687 break; 559 break;
688 } 560 }
689 mutex_unlock(&p_slot->lock); 561 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 0ebf754fc177..2913f7e68a10 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -176,20 +176,17 @@ static void pcie_wait_cmd(struct controller *ctrl)
176 jiffies_to_msecs(jiffies - ctrl->cmd_started)); 176 jiffies_to_msecs(jiffies - ctrl->cmd_started));
177} 177}
178 178
179/** 179static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
180 * pcie_write_cmd - Issue controller command 180 u16 mask, bool wait)
181 * @ctrl: controller to which the command is issued
182 * @cmd: command value written to slot control register
183 * @mask: bitmask of slot control register to be modified
184 */
185static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
186{ 181{
187 struct pci_dev *pdev = ctrl_dev(ctrl); 182 struct pci_dev *pdev = ctrl_dev(ctrl);
188 u16 slot_ctrl; 183 u16 slot_ctrl;
189 184
190 mutex_lock(&ctrl->ctrl_lock); 185 mutex_lock(&ctrl->ctrl_lock);
191 186
192 /* Wait for any previous command that might still be in progress */ 187 /*
188 * Always wait for any previous command that might still be in progress
189 */
193 pcie_wait_cmd(ctrl); 190 pcie_wait_cmd(ctrl);
194 191
195 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); 192 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
@@ -201,9 +198,33 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
201 ctrl->cmd_started = jiffies; 198 ctrl->cmd_started = jiffies;
202 ctrl->slot_ctrl = slot_ctrl; 199 ctrl->slot_ctrl = slot_ctrl;
203 200
201 /*
202 * Optionally wait for the hardware to be ready for a new command,
203 * indicating completion of the above issued command.
204 */
205 if (wait)
206 pcie_wait_cmd(ctrl);
207
204 mutex_unlock(&ctrl->ctrl_lock); 208 mutex_unlock(&ctrl->ctrl_lock);
205} 209}
206 210
211/**
212 * pcie_write_cmd - Issue controller command
213 * @ctrl: controller to which the command is issued
214 * @cmd: command value written to slot control register
215 * @mask: bitmask of slot control register to be modified
216 */
217static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
218{
219 pcie_do_write_cmd(ctrl, cmd, mask, true);
220}
221
222/* Same as above without waiting for the hardware to latch */
223static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
224{
225 pcie_do_write_cmd(ctrl, cmd, mask, false);
226}
227
207bool pciehp_check_link_active(struct controller *ctrl) 228bool pciehp_check_link_active(struct controller *ctrl)
208{ 229{
209 struct pci_dev *pdev = ctrl_dev(ctrl); 230 struct pci_dev *pdev = ctrl_dev(ctrl);
@@ -291,7 +312,8 @@ int pciehp_check_link_status(struct controller *ctrl)
291 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); 312 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
292 if ((lnk_status & PCI_EXP_LNKSTA_LT) || 313 if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
293 !(lnk_status & PCI_EXP_LNKSTA_NLW)) { 314 !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
294 ctrl_err(ctrl, "Link Training Error occurs\n"); 315 ctrl_err(ctrl, "link training error: status %#06x\n",
316 lnk_status);
295 return -1; 317 return -1;
296 } 318 }
297 319
@@ -422,7 +444,7 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
422 default: 444 default:
423 return; 445 return;
424 } 446 }
425 pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); 447 pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
426 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 448 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
427 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 449 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
428} 450}
@@ -434,7 +456,8 @@ void pciehp_green_led_on(struct slot *slot)
434 if (!PWR_LED(ctrl)) 456 if (!PWR_LED(ctrl))
435 return; 457 return;
436 458
437 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC); 459 pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
460 PCI_EXP_SLTCTL_PIC);
438 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 461 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
439 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 462 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
440 PCI_EXP_SLTCTL_PWR_IND_ON); 463 PCI_EXP_SLTCTL_PWR_IND_ON);
@@ -447,7 +470,8 @@ void pciehp_green_led_off(struct slot *slot)
447 if (!PWR_LED(ctrl)) 470 if (!PWR_LED(ctrl))
448 return; 471 return;
449 472
450 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC); 473 pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
474 PCI_EXP_SLTCTL_PIC);
451 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 475 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
452 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 476 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
453 PCI_EXP_SLTCTL_PWR_IND_OFF); 477 PCI_EXP_SLTCTL_PWR_IND_OFF);
@@ -460,7 +484,8 @@ void pciehp_green_led_blink(struct slot *slot)
460 if (!PWR_LED(ctrl)) 484 if (!PWR_LED(ctrl))
461 return; 485 return;
462 486
463 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC); 487 pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
488 PCI_EXP_SLTCTL_PIC);
464 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 489 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
465 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 490 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
466 PCI_EXP_SLTCTL_PWR_IND_BLINK); 491 PCI_EXP_SLTCTL_PWR_IND_BLINK);
@@ -510,6 +535,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
510 struct pci_dev *dev; 535 struct pci_dev *dev;
511 struct slot *slot = ctrl->slot; 536 struct slot *slot = ctrl->slot;
512 u16 detected, intr_loc; 537 u16 detected, intr_loc;
538 u8 open, present;
539 bool link;
513 540
514 /* 541 /*
515 * In order to guarantee that all interrupt events are 542 * In order to guarantee that all interrupt events are
@@ -532,7 +559,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
532 intr_loc); 559 intr_loc);
533 } while (detected); 560 } while (detected);
534 561
535 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); 562 ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", intr_loc);
536 563
537 /* Check Command Complete Interrupt Pending */ 564 /* Check Command Complete Interrupt Pending */
538 if (intr_loc & PCI_EXP_SLTSTA_CC) { 565 if (intr_loc & PCI_EXP_SLTSTA_CC) {
@@ -555,25 +582,44 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
555 return IRQ_HANDLED; 582 return IRQ_HANDLED;
556 583
557 /* Check MRL Sensor Changed */ 584 /* Check MRL Sensor Changed */
558 if (intr_loc & PCI_EXP_SLTSTA_MRLSC) 585 if (intr_loc & PCI_EXP_SLTSTA_MRLSC) {
559 pciehp_handle_switch_change(slot); 586 pciehp_get_latch_status(slot, &open);
587 ctrl_info(ctrl, "Latch %s on Slot(%s)\n",
588 open ? "open" : "close", slot_name(slot));
589 pciehp_queue_interrupt_event(slot, open ? INT_SWITCH_OPEN :
590 INT_SWITCH_CLOSE);
591 }
560 592
561 /* Check Attention Button Pressed */ 593 /* Check Attention Button Pressed */
562 if (intr_loc & PCI_EXP_SLTSTA_ABP) 594 if (intr_loc & PCI_EXP_SLTSTA_ABP) {
563 pciehp_handle_attention_button(slot); 595 ctrl_info(ctrl, "Button pressed on Slot(%s)\n",
596 slot_name(slot));
597 pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
598 }
564 599
565 /* Check Presence Detect Changed */ 600 /* Check Presence Detect Changed */
566 if (intr_loc & PCI_EXP_SLTSTA_PDC) 601 if (intr_loc & PCI_EXP_SLTSTA_PDC) {
567 pciehp_handle_presence_change(slot); 602 pciehp_get_adapter_status(slot, &present);
603 ctrl_info(ctrl, "Card %spresent on Slot(%s)\n",
604 present ? "" : "not ", slot_name(slot));
605 pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON :
606 INT_PRESENCE_OFF);
607 }
568 608
569 /* Check Power Fault Detected */ 609 /* Check Power Fault Detected */
570 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { 610 if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
571 ctrl->power_fault_detected = 1; 611 ctrl->power_fault_detected = 1;
572 pciehp_handle_power_fault(slot); 612 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(slot));
613 pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
573 } 614 }
574 615
575 if (intr_loc & PCI_EXP_SLTSTA_DLLSC) 616 if (intr_loc & PCI_EXP_SLTSTA_DLLSC) {
576 pciehp_handle_linkstate_change(slot); 617 link = pciehp_check_link_active(ctrl);
618 ctrl_info(ctrl, "slot(%s): Link %s event\n",
619 slot_name(slot), link ? "Up" : "Down");
620 pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
621 INT_LINK_DOWN);
622 }
577 623
578 return IRQ_HANDLED; 624 return IRQ_HANDLED;
579} 625}
@@ -613,7 +659,7 @@ void pcie_enable_notification(struct controller *ctrl)
613 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | 659 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
614 PCI_EXP_SLTCTL_DLLSCE); 660 PCI_EXP_SLTCTL_DLLSCE);
615 661
616 pcie_write_cmd(ctrl, cmd, mask); 662 pcie_write_cmd_nowait(ctrl, cmd, mask);
617 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 663 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
618 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); 664 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
619} 665}
@@ -664,7 +710,7 @@ int pciehp_reset_slot(struct slot *slot, int probe)
664 pci_reset_bridge_secondary_bus(ctrl->pcie->port); 710 pci_reset_bridge_secondary_bus(ctrl->pcie->port);
665 711
666 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); 712 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
667 pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask); 713 pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
668 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 714 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
669 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask); 715 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
670 if (pciehp_poll_mode) 716 if (pciehp_poll_mode)
@@ -724,48 +770,13 @@ static void pcie_cleanup_slot(struct controller *ctrl)
724 770
725static inline void dbg_ctrl(struct controller *ctrl) 771static inline void dbg_ctrl(struct controller *ctrl)
726{ 772{
727 int i;
728 u16 reg16;
729 struct pci_dev *pdev = ctrl->pcie->port; 773 struct pci_dev *pdev = ctrl->pcie->port;
774 u16 reg16;
730 775
731 if (!pciehp_debug) 776 if (!pciehp_debug)
732 return; 777 return;
733 778
734 ctrl_info(ctrl, "Hotplug Controller:\n");
735 ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n",
736 pci_name(pdev), pdev->irq);
737 ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor);
738 ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device);
739 ctrl_info(ctrl, " Subsystem ID : 0x%04x\n",
740 pdev->subsystem_device);
741 ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
742 pdev->subsystem_vendor);
743 ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n",
744 pci_pcie_cap(pdev));
745 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
746 if (!pci_resource_len(pdev, i))
747 continue;
748 ctrl_info(ctrl, " PCI resource [%d] : %pR\n",
749 i, &pdev->resource[i]);
750 }
751 ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); 779 ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
752 ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
753 ctrl_info(ctrl, " Attention Button : %3s\n",
754 ATTN_BUTTN(ctrl) ? "yes" : "no");
755 ctrl_info(ctrl, " Power Controller : %3s\n",
756 POWER_CTRL(ctrl) ? "yes" : "no");
757 ctrl_info(ctrl, " MRL Sensor : %3s\n",
758 MRL_SENS(ctrl) ? "yes" : "no");
759 ctrl_info(ctrl, " Attention Indicator : %3s\n",
760 ATTN_LED(ctrl) ? "yes" : "no");
761 ctrl_info(ctrl, " Power Indicator : %3s\n",
762 PWR_LED(ctrl) ? "yes" : "no");
763 ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n",
764 HP_SUPR_RM(ctrl) ? "yes" : "no");
765 ctrl_info(ctrl, " EMI Present : %3s\n",
766 EMI(ctrl) ? "yes" : "no");
767 ctrl_info(ctrl, " Command Completed : %3s\n",
768 NO_CMD_CMPL(ctrl) ? "no" : "yes");
769 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16); 780 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
770 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); 781 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
771 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16); 782 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
@@ -794,10 +805,8 @@ struct controller *pcie_init(struct pcie_device *dev)
794 805
795 /* Check if Data Link Layer Link Active Reporting is implemented */ 806 /* Check if Data Link Layer Link Active Reporting is implemented */
796 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); 807 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
797 if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { 808 if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
798 ctrl_dbg(ctrl, "Link Active Reporting supported\n");
799 ctrl->link_active_reporting = 1; 809 ctrl->link_active_reporting = 1;
800 }
801 810
802 /* Clear all remaining event bits in Slot Status register */ 811 /* Clear all remaining event bits in Slot Status register */
803 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, 812 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -805,13 +814,15 @@ struct controller *pcie_init(struct pcie_device *dev)
805 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 814 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
806 PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); 815 PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
807 816
808 ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n", 817 ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
809 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, 818 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
810 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), 819 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
811 FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
812 FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
813 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP), 820 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
814 FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP), 821 FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
822 FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
823 FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
824 FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
825 FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
815 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), 826 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
816 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), 827 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
817 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC)); 828 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index c3e7dfcf9ff5..f66be868ad21 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -185,27 +185,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev)
185 return default_restore_msi_irqs(dev); 185 return default_restore_msi_irqs(dev);
186} 186}
187 187
188static void msi_set_enable(struct pci_dev *dev, int enable)
189{
190 u16 control;
191
192 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
193 control &= ~PCI_MSI_FLAGS_ENABLE;
194 if (enable)
195 control |= PCI_MSI_FLAGS_ENABLE;
196 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
197}
198
199static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
200{
201 u16 ctrl;
202
203 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
204 ctrl &= ~clear;
205 ctrl |= set;
206 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
207}
208
209static inline __attribute_const__ u32 msi_mask(unsigned x) 188static inline __attribute_const__ u32 msi_mask(unsigned x)
210{ 189{
211 /* Don't shift by >= width of type */ 190 /* Don't shift by >= width of type */
@@ -452,7 +431,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
452 entry = irq_get_msi_desc(dev->irq); 431 entry = irq_get_msi_desc(dev->irq);
453 432
454 pci_intx_for_msi(dev, 0); 433 pci_intx_for_msi(dev, 0);
455 msi_set_enable(dev, 0); 434 pci_msi_set_enable(dev, 0);
456 arch_restore_msi_irqs(dev); 435 arch_restore_msi_irqs(dev);
457 436
458 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 437 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
@@ -473,14 +452,14 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
473 452
474 /* route the table */ 453 /* route the table */
475 pci_intx_for_msi(dev, 0); 454 pci_intx_for_msi(dev, 0);
476 msix_clear_and_set_ctrl(dev, 0, 455 pci_msix_clear_and_set_ctrl(dev, 0,
477 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); 456 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
478 457
479 arch_restore_msi_irqs(dev); 458 arch_restore_msi_irqs(dev);
480 list_for_each_entry(entry, &dev->msi_list, list) 459 list_for_each_entry(entry, &dev->msi_list, list)
481 msix_mask_irq(entry, entry->masked); 460 msix_mask_irq(entry, entry->masked);
482 461
483 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 462 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
484} 463}
485 464
486void pci_restore_msi_state(struct pci_dev *dev) 465void pci_restore_msi_state(struct pci_dev *dev)
@@ -647,7 +626,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
647 int ret; 626 int ret;
648 unsigned mask; 627 unsigned mask;
649 628
650 msi_set_enable(dev, 0); /* Disable MSI during set up */ 629 pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
651 630
652 entry = msi_setup_entry(dev, nvec); 631 entry = msi_setup_entry(dev, nvec);
653 if (!entry) 632 if (!entry)
@@ -683,7 +662,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
683 662
684 /* Set MSI enabled bits */ 663 /* Set MSI enabled bits */
685 pci_intx_for_msi(dev, 0); 664 pci_intx_for_msi(dev, 0);
686 msi_set_enable(dev, 1); 665 pci_msi_set_enable(dev, 1);
687 dev->msi_enabled = 1; 666 dev->msi_enabled = 1;
688 667
689 dev->irq = entry->irq; 668 dev->irq = entry->irq;
@@ -775,7 +754,7 @@ static int msix_capability_init(struct pci_dev *dev,
775 void __iomem *base; 754 void __iomem *base;
776 755
777 /* Ensure MSI-X is disabled while it is set up */ 756 /* Ensure MSI-X is disabled while it is set up */
778 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 757 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
779 758
780 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); 759 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
781 /* Request & Map MSI-X table region */ 760 /* Request & Map MSI-X table region */
@@ -801,7 +780,7 @@ static int msix_capability_init(struct pci_dev *dev,
801 * MSI-X registers. We need to mask all the vectors to prevent 780 * MSI-X registers. We need to mask all the vectors to prevent
802 * interrupts coming in before they're fully set up. 781 * interrupts coming in before they're fully set up.
803 */ 782 */
804 msix_clear_and_set_ctrl(dev, 0, 783 pci_msix_clear_and_set_ctrl(dev, 0,
805 PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); 784 PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
806 785
807 msix_program_entries(dev, entries); 786 msix_program_entries(dev, entries);
@@ -814,7 +793,7 @@ static int msix_capability_init(struct pci_dev *dev,
814 pci_intx_for_msi(dev, 0); 793 pci_intx_for_msi(dev, 0);
815 dev->msix_enabled = 1; 794 dev->msix_enabled = 1;
816 795
817 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 796 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
818 797
819 return 0; 798 return 0;
820 799
@@ -919,7 +898,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
919 BUG_ON(list_empty(&dev->msi_list)); 898 BUG_ON(list_empty(&dev->msi_list));
920 desc = list_first_entry(&dev->msi_list, struct msi_desc, list); 899 desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
921 900
922 msi_set_enable(dev, 0); 901 pci_msi_set_enable(dev, 0);
923 pci_intx_for_msi(dev, 1); 902 pci_intx_for_msi(dev, 1);
924 dev->msi_enabled = 0; 903 dev->msi_enabled = 0;
925 904
@@ -1027,7 +1006,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
1027 __pci_msix_desc_mask_irq(entry, 1); 1006 __pci_msix_desc_mask_irq(entry, 1);
1028 } 1007 }
1029 1008
1030 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 1009 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1031 pci_intx_for_msi(dev, 1); 1010 pci_intx_for_msi(dev, 1);
1032 dev->msix_enabled = 0; 1011 dev->msix_enabled = 0;
1033} 1012}
@@ -1062,18 +1041,6 @@ EXPORT_SYMBOL(pci_msi_enabled);
1062void pci_msi_init_pci_dev(struct pci_dev *dev) 1041void pci_msi_init_pci_dev(struct pci_dev *dev)
1063{ 1042{
1064 INIT_LIST_HEAD(&dev->msi_list); 1043 INIT_LIST_HEAD(&dev->msi_list);
1065
1066 /* Disable the msi hardware to avoid screaming interrupts
1067 * during boot. This is the power on reset default so
1068 * usually this should be a noop.
1069 */
1070 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1071 if (dev->msi_cap)
1072 msi_set_enable(dev, 0);
1073
1074 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1075 if (dev->msix_cap)
1076 msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1077} 1044}
1078 1045
1079/** 1046/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index acc4b6ef78c4..0008c950452c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3101,39 +3101,6 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev)
3101} 3101}
3102EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); 3102EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3103 3103
3104/**
3105 * pci_msi_off - disables any MSI or MSI-X capabilities
3106 * @dev: the PCI device to operate on
3107 *
3108 * If you want to use MSI, see pci_enable_msi() and friends.
3109 * This is a lower-level primitive that allows us to disable
3110 * MSI operation at the device level.
3111 */
3112void pci_msi_off(struct pci_dev *dev)
3113{
3114 int pos;
3115 u16 control;
3116
3117 /*
3118 * This looks like it could go in msi.c, but we need it even when
3119 * CONFIG_PCI_MSI=n. For the same reason, we can't use
3120 * dev->msi_cap or dev->msix_cap here.
3121 */
3122 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3123 if (pos) {
3124 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3125 control &= ~PCI_MSI_FLAGS_ENABLE;
3126 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3127 }
3128 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3129 if (pos) {
3130 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3131 control &= ~PCI_MSIX_FLAGS_ENABLE;
3132 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3133 }
3134}
3135EXPORT_SYMBOL_GPL(pci_msi_off);
3136
3137int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 3104int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3138{ 3105{
3139 return dma_set_max_seg_size(&dev->dev, size); 3106 return dma_set_max_seg_size(&dev->dev, size);
@@ -4324,6 +4291,17 @@ bool pci_device_is_present(struct pci_dev *pdev)
4324} 4291}
4325EXPORT_SYMBOL_GPL(pci_device_is_present); 4292EXPORT_SYMBOL_GPL(pci_device_is_present);
4326 4293
4294void pci_ignore_hotplug(struct pci_dev *dev)
4295{
4296 struct pci_dev *bridge = dev->bus->self;
4297
4298 dev->ignore_hotplug = 1;
4299 /* Propagate the "ignore hotplug" setting to the parent bridge. */
4300 if (bridge)
4301 bridge->ignore_hotplug = 1;
4302}
4303EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
4304
4327#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 4305#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4328static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 4306static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4329static DEFINE_SPINLOCK(resource_alignment_lock); 4307static DEFINE_SPINLOCK(resource_alignment_lock);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9bd762c237ab..4ff0ff1c4088 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -146,6 +146,27 @@ static inline void pci_no_msi(void) { }
146static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } 146static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
147#endif 147#endif
148 148
149static inline void pci_msi_set_enable(struct pci_dev *dev, int enable)
150{
151 u16 control;
152
153 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
154 control &= ~PCI_MSI_FLAGS_ENABLE;
155 if (enable)
156 control |= PCI_MSI_FLAGS_ENABLE;
157 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
158}
159
160static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
161{
162 u16 ctrl;
163
164 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
165 ctrl &= ~clear;
166 ctrl |= set;
167 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
168}
169
149void pci_realloc_get_opt(char *); 170void pci_realloc_get_opt(char *);
150 171
151static inline int pci_no_d1d2(struct pci_dev *dev) 172static inline int pci_no_d1d2(struct pci_dev *dev)
@@ -216,17 +237,6 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
216 struct list_head *fail_head); 237 struct list_head *fail_head);
217bool pci_bus_clip_resource(struct pci_dev *dev, int idx); 238bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
218 239
219/**
220 * pci_ari_enabled - query ARI forwarding status
221 * @bus: the PCI bus
222 *
223 * Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
224 */
225static inline int pci_ari_enabled(struct pci_bus *bus)
226{
227 return bus->self && bus->self->ari_enabled;
228}
229
230void pci_reassigndev_resource_alignment(struct pci_dev *dev); 240void pci_reassigndev_resource_alignment(struct pci_dev *dev);
231void pci_disable_bridge_window(struct pci_dev *dev); 241void pci_disable_bridge_window(struct pci_dev *dev);
232 242
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 5653ea94547f..9803e3d039fe 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -425,8 +425,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev)
425 425
426 if (driver && driver->reset_link) { 426 if (driver && driver->reset_link) {
427 status = driver->reset_link(udev); 427 status = driver->reset_link(udev);
428 } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM || 428 } else if (udev->has_secondary_link) {
429 pci_pcie_type(udev) == PCI_EXP_TYPE_ROOT_PORT) {
430 status = default_reset_link(udev); 429 status = default_reset_link(udev);
431 } else { 430 } else {
432 dev_printk(KERN_DEBUG, &dev->dev, 431 dev_printk(KERN_DEBUG, &dev->dev,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 7d4fcdc512aa..317e3558a35e 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -127,15 +127,12 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
127{ 127{
128 struct pci_dev *child; 128 struct pci_dev *child;
129 struct pci_bus *linkbus = link->pdev->subordinate; 129 struct pci_bus *linkbus = link->pdev->subordinate;
130 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
130 131
131 list_for_each_entry(child, &linkbus->devices, bus_list) { 132 list_for_each_entry(child, &linkbus->devices, bus_list)
132 if (enable) 133 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
133 pcie_capability_set_word(child, PCI_EXP_LNKCTL, 134 PCI_EXP_LNKCTL_CLKREQ_EN,
134 PCI_EXP_LNKCTL_CLKREQ_EN); 135 val);
135 else
136 pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
137 PCI_EXP_LNKCTL_CLKREQ_EN);
138 }
139 link->clkpm_enabled = !!enable; 136 link->clkpm_enabled = !!enable;
140} 137}
141 138
@@ -525,7 +522,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
525 INIT_LIST_HEAD(&link->children); 522 INIT_LIST_HEAD(&link->children);
526 INIT_LIST_HEAD(&link->link); 523 INIT_LIST_HEAD(&link->link);
527 link->pdev = pdev; 524 link->pdev = pdev;
528 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) { 525 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
529 struct pcie_link_state *parent; 526 struct pcie_link_state *parent;
530 parent = pdev->bus->parent->self->link_state; 527 parent = pdev->bus->parent->self->link_state;
531 if (!parent) { 528 if (!parent) {
@@ -559,10 +556,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
559 if (!aspm_support_enabled) 556 if (!aspm_support_enabled)
560 return; 557 return;
561 558
562 if (!pci_is_pcie(pdev) || pdev->link_state) 559 if (pdev->link_state)
563 return; 560 return;
564 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && 561
565 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) 562 /*
563 * We allocate pcie_link_state for the component on the upstream
564 * end of a Link, so there's nothing to do unless this device has a
565 * Link on its secondary side.
566 */
567 if (!pdev->has_secondary_link)
566 return; 568 return;
567 569
568 /* VIA has a strange chipset, root port is under a bridge */ 570 /* VIA has a strange chipset, root port is under a bridge */
@@ -675,10 +677,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
675{ 677{
676 struct pcie_link_state *link = pdev->link_state; 678 struct pcie_link_state *link = pdev->link_state;
677 679
678 if (aspm_disabled || !pci_is_pcie(pdev) || !link) 680 if (aspm_disabled || !link)
679 return;
680 if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
681 (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
682 return; 681 return;
683 /* 682 /*
684 * Devices changed PM state, we should recheck if latency 683 * Devices changed PM state, we should recheck if latency
@@ -696,16 +695,12 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
696{ 695{
697 struct pcie_link_state *link = pdev->link_state; 696 struct pcie_link_state *link = pdev->link_state;
698 697
699 if (aspm_disabled || !pci_is_pcie(pdev) || !link) 698 if (aspm_disabled || !link)
700 return; 699 return;
701 700
702 if (aspm_policy != POLICY_POWERSAVE) 701 if (aspm_policy != POLICY_POWERSAVE)
703 return; 702 return;
704 703
705 if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
706 (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
707 return;
708
709 down_read(&pci_bus_sem); 704 down_read(&pci_bus_sem);
710 mutex_lock(&aspm_lock); 705 mutex_lock(&aspm_lock);
711 pcie_config_aspm_path(link); 706 pcie_config_aspm_path(link);
@@ -714,8 +709,7 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
714 up_read(&pci_bus_sem); 709 up_read(&pci_bus_sem);
715} 710}
716 711
717static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, 712static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
718 bool force)
719{ 713{
720 struct pci_dev *parent = pdev->bus->self; 714 struct pci_dev *parent = pdev->bus->self;
721 struct pcie_link_state *link; 715 struct pcie_link_state *link;
@@ -723,8 +717,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
723 if (!pci_is_pcie(pdev)) 717 if (!pci_is_pcie(pdev))
724 return; 718 return;
725 719
726 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 720 if (pdev->has_secondary_link)
727 pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)
728 parent = pdev; 721 parent = pdev;
729 if (!parent || !parent->link_state) 722 if (!parent || !parent->link_state)
730 return; 723 return;
@@ -737,7 +730,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
737 * a similar mechanism using "PciASPMOptOut", which is also 730 * a similar mechanism using "PciASPMOptOut", which is also
738 * ignored in this situation. 731 * ignored in this situation.
739 */ 732 */
740 if (aspm_disabled && !force) { 733 if (aspm_disabled) {
741 dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n"); 734 dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n");
742 return; 735 return;
743 } 736 }
@@ -763,7 +756,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
763 756
764void pci_disable_link_state_locked(struct pci_dev *pdev, int state) 757void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
765{ 758{
766 __pci_disable_link_state(pdev, state, false, false); 759 __pci_disable_link_state(pdev, state, false);
767} 760}
768EXPORT_SYMBOL(pci_disable_link_state_locked); 761EXPORT_SYMBOL(pci_disable_link_state_locked);
769 762
@@ -778,7 +771,7 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
778 */ 771 */
779void pci_disable_link_state(struct pci_dev *pdev, int state) 772void pci_disable_link_state(struct pci_dev *pdev, int state)
780{ 773{
781 __pci_disable_link_state(pdev, state, true, false); 774 __pci_disable_link_state(pdev, state, true);
782} 775}
783EXPORT_SYMBOL(pci_disable_link_state); 776EXPORT_SYMBOL(pci_disable_link_state);
784 777
@@ -907,9 +900,7 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
907{ 900{
908 struct pcie_link_state *link_state = pdev->link_state; 901 struct pcie_link_state *link_state = pdev->link_state;
909 902
910 if (!pci_is_pcie(pdev) || 903 if (!link_state)
911 (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
912 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
913 return; 904 return;
914 905
915 if (link_state->aspm_support) 906 if (link_state->aspm_support)
@@ -924,9 +915,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
924{ 915{
925 struct pcie_link_state *link_state = pdev->link_state; 916 struct pcie_link_state *link_state = pdev->link_state;
926 917
927 if (!pci_is_pcie(pdev) || 918 if (!link_state)
928 (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
929 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
930 return; 919 return;
931 920
932 if (link_state->aspm_support) 921 if (link_state->aspm_support)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6675a7a1b9fc..cefd636681b6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -254,8 +254,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
254 } 254 }
255 255
256 if (res->flags & IORESOURCE_MEM_64) { 256 if (res->flags & IORESOURCE_MEM_64) {
257 if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) && 257 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
258 sz64 > 0x100000000ULL) { 258 && sz64 > 0x100000000ULL) {
259 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 259 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
260 res->start = 0; 260 res->start = 0;
261 res->end = 0; 261 res->end = 0;
@@ -264,7 +264,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
264 goto out; 264 goto out;
265 } 265 }
266 266
267 if ((sizeof(dma_addr_t) < 8) && l) { 267 if ((sizeof(pci_bus_addr_t) < 8) && l) {
268 /* Above 32-bit boundary; try to reallocate */ 268 /* Above 32-bit boundary; try to reallocate */
269 res->flags |= IORESOURCE_UNSET; 269 res->flags |= IORESOURCE_UNSET;
270 res->start = 0; 270 res->start = 0;
@@ -399,7 +399,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
399 struct pci_dev *dev = child->self; 399 struct pci_dev *dev = child->self;
400 u16 mem_base_lo, mem_limit_lo; 400 u16 mem_base_lo, mem_limit_lo;
401 u64 base64, limit64; 401 u64 base64, limit64;
402 dma_addr_t base, limit; 402 pci_bus_addr_t base, limit;
403 struct pci_bus_region region; 403 struct pci_bus_region region;
404 struct resource *res; 404 struct resource *res;
405 405
@@ -426,8 +426,8 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
426 } 426 }
427 } 427 }
428 428
429 base = (dma_addr_t) base64; 429 base = (pci_bus_addr_t) base64;
430 limit = (dma_addr_t) limit64; 430 limit = (pci_bus_addr_t) limit64;
431 431
432 if (base != base64) { 432 if (base != base64) {
433 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", 433 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
@@ -973,6 +973,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
973{ 973{
974 int pos; 974 int pos;
975 u16 reg16; 975 u16 reg16;
976 int type;
977 struct pci_dev *parent;
976 978
977 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 979 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
978 if (!pos) 980 if (!pos)
@@ -982,6 +984,22 @@ void set_pcie_port_type(struct pci_dev *pdev)
982 pdev->pcie_flags_reg = reg16; 984 pdev->pcie_flags_reg = reg16;
983 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16); 985 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
984 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 986 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
987
988 /*
989 * A Root Port is always the upstream end of a Link. No PCIe
990 * component has two Links. Two Links are connected by a Switch
991 * that has a Port on each Link and internal logic to connect the
992 * two Ports.
993 */
994 type = pci_pcie_type(pdev);
995 if (type == PCI_EXP_TYPE_ROOT_PORT)
996 pdev->has_secondary_link = 1;
997 else if (type == PCI_EXP_TYPE_UPSTREAM ||
998 type == PCI_EXP_TYPE_DOWNSTREAM) {
999 parent = pci_upstream_bridge(pdev);
1000 if (!parent->has_secondary_link)
1001 pdev->has_secondary_link = 1;
1002 }
985} 1003}
986 1004
987void set_pcie_hotplug_bridge(struct pci_dev *pdev) 1005void set_pcie_hotplug_bridge(struct pci_dev *pdev)
@@ -1085,6 +1103,22 @@ int pci_cfg_space_size(struct pci_dev *dev)
1085 1103
1086#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 1104#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1087 1105
1106static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1107{
1108 /*
1109 * Disable the MSI hardware to avoid screaming interrupts
1110 * during boot. This is the power on reset default so
1111 * usually this should be a noop.
1112 */
1113 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1114 if (dev->msi_cap)
1115 pci_msi_set_enable(dev, 0);
1116
1117 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1118 if (dev->msix_cap)
1119 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1120}
1121
1088/** 1122/**
1089 * pci_setup_device - fill in class and map information of a device 1123 * pci_setup_device - fill in class and map information of a device
1090 * @dev: the device structure to fill 1124 * @dev: the device structure to fill
@@ -1140,6 +1174,8 @@ int pci_setup_device(struct pci_dev *dev)
1140 /* "Unknown power state" */ 1174 /* "Unknown power state" */
1141 dev->current_state = PCI_UNKNOWN; 1175 dev->current_state = PCI_UNKNOWN;
1142 1176
1177 pci_msi_setup_pci_dev(dev);
1178
1143 /* Early fixups, before probing the BARs */ 1179 /* Early fixups, before probing the BARs */
1144 pci_fixup_device(pci_fixup_early, dev); 1180 pci_fixup_device(pci_fixup_early, dev);
1145 /* device class may be changed after fixup */ 1181 /* device class may be changed after fixup */
@@ -1611,7 +1647,7 @@ static int only_one_child(struct pci_bus *bus)
1611 return 0; 1647 return 0;
1612 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) 1648 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1613 return 1; 1649 return 1;
1614 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && 1650 if (parent->has_secondary_link &&
1615 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1651 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1616 return 1; 1652 return 1;
1617 return 0; 1653 return 0;
@@ -2094,25 +2130,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2094} 2130}
2095EXPORT_SYMBOL(pci_scan_root_bus); 2131EXPORT_SYMBOL(pci_scan_root_bus);
2096 2132
2097/* Deprecated; use pci_scan_root_bus() instead */
2098struct pci_bus *pci_scan_bus_parented(struct device *parent,
2099 int bus, struct pci_ops *ops, void *sysdata)
2100{
2101 LIST_HEAD(resources);
2102 struct pci_bus *b;
2103
2104 pci_add_resource(&resources, &ioport_resource);
2105 pci_add_resource(&resources, &iomem_resource);
2106 pci_add_resource(&resources, &busn_resource);
2107 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
2108 if (b)
2109 pci_scan_child_bus(b);
2110 else
2111 pci_free_resource_list(&resources);
2112 return b;
2113}
2114EXPORT_SYMBOL(pci_scan_bus_parented);
2115
2116struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, 2133struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2117 void *sysdata) 2134 void *sysdata)
2118{ 2135{
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2890ad7cf7c6..e9fd0e90fa3b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1593,7 +1593,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a
1593 1593
1594static void quirk_pcie_mch(struct pci_dev *pdev) 1594static void quirk_pcie_mch(struct pci_dev *pdev)
1595{ 1595{
1596 pci_msi_off(pdev);
1597 pdev->no_msi = 1; 1596 pdev->no_msi = 1;
1598} 1597}
1599DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); 1598DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
@@ -1607,7 +1606,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
1607 */ 1606 */
1608static void quirk_pcie_pxh(struct pci_dev *dev) 1607static void quirk_pcie_pxh(struct pci_dev *dev)
1609{ 1608{
1610 pci_msi_off(dev);
1611 dev->no_msi = 1; 1609 dev->no_msi = 1;
1612 dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n"); 1610 dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
1613} 1611}
@@ -3565,6 +3563,8 @@ static void quirk_dma_func1_alias(struct pci_dev *dev)
3565 * SKUs this function is not present, making this a ghost requester. 3563 * SKUs this function is not present, making this a ghost requester.
3566 * https://bugzilla.kernel.org/show_bug.cgi?id=42679 3564 * https://bugzilla.kernel.org/show_bug.cgi?id=42679
3567 */ 3565 */
3566DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
3567 quirk_dma_func1_alias);
3568DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, 3568DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3569 quirk_dma_func1_alias); 3569 quirk_dma_func1_alias);
3570/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ 3570/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
@@ -3733,6 +3733,8 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = {
3733 /* Wellsburg (X99) PCH */ 3733 /* Wellsburg (X99) PCH */
3734 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17, 3734 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
3735 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e, 3735 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
3736 /* Lynx Point (9 series) PCH */
3737 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
3736}; 3738};
3737 3739
3738static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) 3740static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
index 7e1304d2e389..dfbab61a1b47 100644
--- a/drivers/pci/vc.c
+++ b/drivers/pci/vc.c
@@ -108,8 +108,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
108 struct pci_dev *link = NULL; 108 struct pci_dev *link = NULL;
109 109
110 /* Enable VCs from the downstream device */ 110 /* Enable VCs from the downstream device */
111 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || 111 if (!dev->has_secondary_link)
112 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
113 return; 112 return;
114 113
115 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); 114 ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 7cfd2db02deb..240f38872085 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -446,9 +446,15 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
446 unsigned int domain, unsigned int bus) 446 unsigned int domain, unsigned int bus)
447{ 447{
448 struct pci_bus *b; 448 struct pci_bus *b;
449 LIST_HEAD(resources);
449 struct pcifront_sd *sd = NULL; 450 struct pcifront_sd *sd = NULL;
450 struct pci_bus_entry *bus_entry = NULL; 451 struct pci_bus_entry *bus_entry = NULL;
451 int err = 0; 452 int err = 0;
453 static struct resource busn_res = {
454 .start = 0,
455 .end = 255,
456 .flags = IORESOURCE_BUS,
457 };
452 458
453#ifndef CONFIG_PCI_DOMAINS 459#ifndef CONFIG_PCI_DOMAINS
454 if (domain != 0) { 460 if (domain != 0) {
@@ -470,17 +476,21 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
470 err = -ENOMEM; 476 err = -ENOMEM;
471 goto err_out; 477 goto err_out;
472 } 478 }
479 pci_add_resource(&resources, &ioport_resource);
480 pci_add_resource(&resources, &iomem_resource);
481 pci_add_resource(&resources, &busn_res);
473 pcifront_init_sd(sd, domain, bus, pdev); 482 pcifront_init_sd(sd, domain, bus, pdev);
474 483
475 pci_lock_rescan_remove(); 484 pci_lock_rescan_remove();
476 485
477 b = pci_scan_bus_parented(&pdev->xdev->dev, bus, 486 b = pci_scan_root_bus(&pdev->xdev->dev, bus,
478 &pcifront_bus_ops, sd); 487 &pcifront_bus_ops, sd, &resources);
479 if (!b) { 488 if (!b) {
480 dev_err(&pdev->xdev->dev, 489 dev_err(&pdev->xdev->dev,
481 "Error creating PCI Frontend Bus!\n"); 490 "Error creating PCI Frontend Bus!\n");
482 err = -ENOMEM; 491 err = -ENOMEM;
483 pci_unlock_rescan_remove(); 492 pci_unlock_rescan_remove();
493 pci_free_resource_list(&resources);
484 goto err_out; 494 goto err_out;
485 } 495 }
486 496
@@ -488,7 +498,7 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
488 498
489 list_add(&bus_entry->list, &pdev->root_buses); 499 list_add(&bus_entry->list, &pdev->root_buses);
490 500
491 /* pci_scan_bus_parented skips devices which do not have a have 501 /* pci_scan_root_bus skips devices which do not have a
492 * devfn==0. The pcifront_scan_bus enumerates all devfn. */ 502 * devfn==0. The pcifront_scan_bus enumerates all devfn. */
493 err = pcifront_scan_bus(pdev, domain, bus, b); 503 err = pcifront_scan_bus(pdev, domain, bus, b);
494 504
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index eba1b7ac7294..5447b8186332 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -502,9 +502,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
502 INIT_LIST_HEAD(&vp_dev->virtqueues); 502 INIT_LIST_HEAD(&vp_dev->virtqueues);
503 spin_lock_init(&vp_dev->lock); 503 spin_lock_init(&vp_dev->lock);
504 504
505 /* Disable MSI/MSIX to bring device to a known good state. */
506 pci_msi_off(pci_dev);
507
508 /* enable the device */ 505 /* enable the device */
509 rc = pci_enable_device(pci_dev); 506 rc = pci_enable_device(pci_dev);
510 if (rc) 507 if (rc)
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index e80a0495e5b0..f24bc519bf31 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -6,19 +6,6 @@
6#ifndef _ASM_GENERIC_PCI_H 6#ifndef _ASM_GENERIC_PCI_H
7#define _ASM_GENERIC_PCI_H 7#define _ASM_GENERIC_PCI_H
8 8
9static inline struct resource *
10pcibios_select_root(struct pci_dev *pdev, struct resource *res)
11{
12 struct resource *root = NULL;
13
14 if (res->flags & IORESOURCE_IO)
15 root = &ioport_resource;
16 if (res->flags & IORESOURCE_MEM)
17 root = &iomem_resource;
18
19 return root;
20}
21
22#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ 9#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
23static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 10static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
24{ 11{
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 353db8dc4c6e..8a0321a8fb59 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -355,6 +355,7 @@ struct pci_dev {
355 unsigned int broken_intx_masking:1; 355 unsigned int broken_intx_masking:1;
356 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ 356 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
357 unsigned int irq_managed:1; 357 unsigned int irq_managed:1;
358 unsigned int has_secondary_link:1;
358 pci_dev_flags_t dev_flags; 359 pci_dev_flags_t dev_flags;
359 atomic_t enable_cnt; /* pci_enable_device has been called */ 360 atomic_t enable_cnt; /* pci_enable_device has been called */
360 361
@@ -577,9 +578,15 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
577int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, 578int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
578 int reg, int len, u32 val); 579 int reg, int len, u32 val);
579 580
581#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
582typedef u64 pci_bus_addr_t;
583#else
584typedef u32 pci_bus_addr_t;
585#endif
586
580struct pci_bus_region { 587struct pci_bus_region {
581 dma_addr_t start; 588 pci_bus_addr_t start;
582 dma_addr_t end; 589 pci_bus_addr_t end;
583}; 590};
584 591
585struct pci_dynids { 592struct pci_dynids {
@@ -773,8 +780,6 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
773void pcibios_scan_specific_bus(int busn); 780void pcibios_scan_specific_bus(int busn);
774struct pci_bus *pci_find_bus(int domain, int busnr); 781struct pci_bus *pci_find_bus(int domain, int busnr);
775void pci_bus_add_devices(const struct pci_bus *bus); 782void pci_bus_add_devices(const struct pci_bus *bus);
776struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
777 struct pci_ops *ops, void *sysdata);
778struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); 783struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
779struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 784struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
780 struct pci_ops *ops, void *sysdata, 785 struct pci_ops *ops, void *sysdata,
@@ -974,7 +979,6 @@ void pci_intx(struct pci_dev *dev, int enable);
974bool pci_intx_mask_supported(struct pci_dev *dev); 979bool pci_intx_mask_supported(struct pci_dev *dev);
975bool pci_check_and_mask_intx(struct pci_dev *dev); 980bool pci_check_and_mask_intx(struct pci_dev *dev);
976bool pci_check_and_unmask_intx(struct pci_dev *dev); 981bool pci_check_and_unmask_intx(struct pci_dev *dev);
977void pci_msi_off(struct pci_dev *dev);
978int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); 982int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
979int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); 983int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
980int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); 984int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
@@ -1006,6 +1010,7 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1006int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); 1010int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1007int pci_select_bars(struct pci_dev *dev, unsigned long flags); 1011int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1008bool pci_device_is_present(struct pci_dev *pdev); 1012bool pci_device_is_present(struct pci_dev *pdev);
1013void pci_ignore_hotplug(struct pci_dev *dev);
1009 1014
1010/* ROM control related routines */ 1015/* ROM control related routines */
1011int pci_enable_rom(struct pci_dev *pdev); 1016int pci_enable_rom(struct pci_dev *pdev);
@@ -1043,11 +1048,6 @@ bool pci_dev_run_wake(struct pci_dev *dev);
1043bool pci_check_pme_status(struct pci_dev *dev); 1048bool pci_check_pme_status(struct pci_dev *dev);
1044void pci_pme_wakeup_bus(struct pci_bus *bus); 1049void pci_pme_wakeup_bus(struct pci_bus *bus);
1045 1050
1046static inline void pci_ignore_hotplug(struct pci_dev *dev)
1047{
1048 dev->ignore_hotplug = 1;
1049}
1050
1051static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1051static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1052 bool enable) 1052 bool enable)
1053{ 1053{
@@ -1128,7 +1128,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1128 1128
1129int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); 1129int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1130 1130
1131static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1131static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1132{ 1132{
1133 struct pci_bus_region region; 1133 struct pci_bus_region region;
1134 1134
@@ -1197,15 +1197,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1197#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) 1197#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1198#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) 1198#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1199 1199
1200enum pci_dma_burst_strategy {
1201 PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
1202 strategy_parameter is N/A */
1203 PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
1204 byte boundaries */
1205 PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
1206 strategy_parameter byte boundaries */
1207};
1208
1209struct msix_entry { 1200struct msix_entry {
1210 u32 vector; /* kernel uses to write allocated vector */ 1201 u32 vector; /* kernel uses to write allocated vector */
1211 u16 entry; /* driver uses to specify entry, OS writes */ 1202 u16 entry; /* driver uses to specify entry, OS writes */
@@ -1430,8 +1421,6 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1430{ return -EIO; } 1421{ return -EIO; }
1431static inline void pci_release_regions(struct pci_dev *dev) { } 1422static inline void pci_release_regions(struct pci_dev *dev) { }
1432 1423
1433#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
1434
1435static inline void pci_block_cfg_access(struct pci_dev *dev) { } 1424static inline void pci_block_cfg_access(struct pci_dev *dev) { }
1436static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) 1425static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
1437{ return 0; } 1426{ return 0; }
@@ -1905,4 +1894,15 @@ static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
1905{ 1894{
1906 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; 1895 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
1907} 1896}
1897
1898/**
1899 * pci_ari_enabled - query ARI forwarding status
1900 * @bus: the PCI bus
1901 *
1902 * Returns true if ARI forwarding is enabled.
1903 */
1904static inline bool pci_ari_enabled(struct pci_bus *bus)
1905{
1906 return bus->self && bus->self->ari_enabled;
1907}
1908#endif /* LINUX_PCI_H */ 1908#endif /* LINUX_PCI_H */
diff --git a/include/linux/types.h b/include/linux/types.h
index 59698be03490..8715287c3b1f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -139,12 +139,20 @@ typedef unsigned long blkcnt_t;
139 */ 139 */
140#define pgoff_t unsigned long 140#define pgoff_t unsigned long
141 141
142/* A dma_addr_t can hold any valid DMA or bus address for the platform */ 142/*
143 * A dma_addr_t can hold any valid DMA address, i.e., any address returned
144 * by the DMA API.
145 *
146 * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
147 * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
148 * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
149 * so they don't care about the size of the actual bus addresses.
150 */
143#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 151#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
144typedef u64 dma_addr_t; 152typedef u64 dma_addr_t;
145#else 153#else
146typedef u32 dma_addr_t; 154typedef u32 dma_addr_t;
147#endif /* dma_addr_t */ 155#endif
148 156
149typedef unsigned __bitwise__ gfp_t; 157typedef unsigned __bitwise__ gfp_t;
150typedef unsigned __bitwise__ fmode_t; 158typedef unsigned __bitwise__ fmode_t;