aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt6
-rw-r--r--Documentation/PCI/PCIEBUS-HOWTO.txt33
-rw-r--r--Documentation/PCI/pci-error-recovery.txt24
-rw-r--r--Documentation/PCI/pci.txt24
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon-pcie.txt37
-rw-r--r--Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt29
-rw-r--r--Documentation/devicetree/bindings/phy/samsung-phy.txt17
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/x86/kernel/apic/msi.c2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/pci_mcfg.c5
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h2
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c7
-rw-r--r--drivers/misc/genwqe/card_base.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c128
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h8
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/access.c5
-rw-r--r--drivers/pci/dwc/Kconfig132
-rw-r--r--drivers/pci/dwc/Makefile24
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c (renamed from drivers/pci/host/pci-dra7xx.c)247
-rw-r--r--drivers/pci/dwc/pci-exynos.c751
-rw-r--r--drivers/pci/dwc/pci-imx6.c (renamed from drivers/pci/host/pci-imx6.c)162
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c (renamed from drivers/pci/host/pci-keystone-dw.c)87
-rw-r--r--drivers/pci/dwc/pci-keystone.c (renamed from drivers/pci/host/pci-keystone.c)56
-rw-r--r--drivers/pci/dwc/pci-keystone.h (renamed from drivers/pci/host/pci-keystone.h)4
-rw-r--r--drivers/pci/dwc/pci-layerscape.c (renamed from drivers/pci/host/pci-layerscape.c)93
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c (renamed from drivers/pci/host/pcie-armada8k.c)87
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c (renamed from drivers/pci/host/pcie-artpec6.c)50
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c (renamed from drivers/pci/host/pcie-designware.c)445
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c (renamed from drivers/pci/host/pcie-designware-plat.c)29
-rw-r--r--drivers/pci/dwc/pcie-designware.c233
-rw-r--r--drivers/pci/dwc/pcie-designware.h198
-rw-r--r--drivers/pci/dwc/pcie-hisi.c (renamed from drivers/pci/host/pcie-hisi.c)139
-rw-r--r--drivers/pci/dwc/pcie-qcom.c (renamed from drivers/pci/host/pcie-qcom.c)87
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c (renamed from drivers/pci/host/pcie-spear13xx.c)85
-rw-r--r--drivers/pci/host/Kconfig113
-rw-r--r--drivers/pci/host/Makefile12
-rw-r--r--drivers/pci/host/pci-exynos.c629
-rw-r--r--drivers/pci/host/pci-host-common.c2
-rw-r--r--drivers/pci/host/pci-hyperv.c20
-rw-r--r--drivers/pci/host/pcie-altera.c2
-rw-r--r--drivers/pci/host/pcie-designware.h86
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c7
-rw-r--r--drivers/pci/host/pcie-iproc.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/msi.c120
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-sysfs.c23
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig8
-rw-r--r--drivers/pci/pcie/aspm.c291
-rw-r--r--drivers/pci/pcie/pcie-dpc.c34
-rw-r--r--drivers/pci/pcie/portdrv_core.c161
-rw-r--r--drivers/pci/probe.c33
-rw-r--r--drivers/pci/quirks.c50
-rw-r--r--drivers/pci/setup-bus.c11
-rw-r--r--drivers/phy/Kconfig8
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-exynos-pcie.c285
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/pci.h19
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/uapi/linux/pci_regs.h17
69 files changed, 3084 insertions, 2167 deletions
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index cd9c9f6a7cd9..1e37138027a3 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -162,8 +162,6 @@ The following old APIs to enable and disable MSI or MSI-X interrupts should
162not be used in new code: 162not be used in new code:
163 163
164 pci_enable_msi() /* deprecated */ 164 pci_enable_msi() /* deprecated */
165 pci_enable_msi_range() /* deprecated */
166 pci_enable_msi_exact() /* deprecated */
167 pci_disable_msi() /* deprecated */ 165 pci_disable_msi() /* deprecated */
168 pci_enable_msix_range() /* deprecated */ 166 pci_enable_msix_range() /* deprecated */
169 pci_enable_msix_exact() /* deprecated */ 167 pci_enable_msix_exact() /* deprecated */
@@ -268,5 +266,5 @@ or disabled (0). If 0 is found in any of the msi_bus files belonging
268to bridges between the PCI root and the device, MSIs are disabled. 266to bridges between the PCI root and the device, MSIs are disabled.
269 267
270It is also worth checking the device driver to see whether it supports MSIs. 268It is also worth checking the device driver to see whether it supports MSIs.
271For example, it may contain calls to pci_enable_msi_range() or 269For example, it may contain calls to pci_irq_alloc_vectors() with the
272pci_enable_msix_range(). 270PCI_IRQ_MSI or PCI_IRQ_MSIX flags.
diff --git a/Documentation/PCI/PCIEBUS-HOWTO.txt b/Documentation/PCI/PCIEBUS-HOWTO.txt
index 6bd5f372adec..15f0bb3b5045 100644
--- a/Documentation/PCI/PCIEBUS-HOWTO.txt
+++ b/Documentation/PCI/PCIEBUS-HOWTO.txt
@@ -161,21 +161,13 @@ Since all service drivers of a PCI-PCI Bridge Port device are
161allowed to run simultaneously, below lists a few of possible resource 161allowed to run simultaneously, below lists a few of possible resource
162conflicts with proposed solutions. 162conflicts with proposed solutions.
163 163
1646.1 MSI Vector Resource 1646.1 MSI and MSI-X Vector Resource
165 165
166The MSI capability structure enables a device software driver to call 166Once MSI or MSI-X interrupts are enabled on a device, it stays in this
167pci_enable_msi to request MSI based interrupts. Once MSI interrupts 167mode until they are disabled again. Since service drivers of the same
168are enabled on a device, it stays in this mode until a device driver 168PCI-PCI Bridge port share the same physical device, if an individual
169calls pci_disable_msi to disable MSI interrupts and revert back to 169service driver enables or disables MSI/MSI-X mode it may result
170INTx emulation mode. Since service drivers of the same PCI-PCI Bridge 170unpredictable behavior.
171port share the same physical device, if an individual service driver
172calls pci_enable_msi/pci_disable_msi it may result unpredictable
173behavior. For example, two service drivers run simultaneously on the
174same physical Root Port. Both service drivers call pci_enable_msi to
175request MSI based interrupts. A service driver may not know whether
176any other service drivers have run on this Root Port. If either one
177of them calls pci_disable_msi, it puts the other service driver
178in a wrong interrupt mode.
179 171
180To avoid this situation all service drivers are not permitted to 172To avoid this situation all service drivers are not permitted to
181switch interrupt mode on its device. The PCI Express Port Bus driver 173switch interrupt mode on its device. The PCI Express Port Bus driver
@@ -187,17 +179,6 @@ driver. Service drivers should use (struct pcie_device*)dev->irq to
187call request_irq/free_irq. In addition, the interrupt mode is stored 179call request_irq/free_irq. In addition, the interrupt mode is stored
188in the field interrupt_mode of struct pcie_device. 180in the field interrupt_mode of struct pcie_device.
189 181
1906.2 MSI-X Vector Resources
191
192Similar to the MSI a device driver for an MSI-X capable device can
193call pci_enable_msix to request MSI-X interrupts. All service drivers
194are not permitted to switch interrupt mode on its device. The PCI
195Express Port Bus driver is responsible for determining the interrupt
196mode and this should be transparent to service drivers. Any attempt
197by service driver to call pci_enable_msix/pci_disable_msix may
198result unpredictable behavior. Service drivers should use
199(struct pcie_device*)dev->irq and call request_irq/free_irq.
200
2016.3 PCI Memory/IO Mapped Regions 1826.3 PCI Memory/IO Mapped Regions
202 183
203Service drivers for PCI Express Power Management (PME), Advanced 184Service drivers for PCI Express Power Management (PME), Advanced
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index ac26869c7db4..da3b2176d5da 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -78,7 +78,6 @@ struct pci_error_handlers
78{ 78{
79 int (*error_detected)(struct pci_dev *dev, enum pci_channel_state); 79 int (*error_detected)(struct pci_dev *dev, enum pci_channel_state);
80 int (*mmio_enabled)(struct pci_dev *dev); 80 int (*mmio_enabled)(struct pci_dev *dev);
81 int (*link_reset)(struct pci_dev *dev);
82 int (*slot_reset)(struct pci_dev *dev); 81 int (*slot_reset)(struct pci_dev *dev);
83 void (*resume)(struct pci_dev *dev); 82 void (*resume)(struct pci_dev *dev);
84}; 83};
@@ -104,8 +103,7 @@ if it implements any, it must implement error_detected(). If a callback
104is not implemented, the corresponding feature is considered unsupported. 103is not implemented, the corresponding feature is considered unsupported.
105For example, if mmio_enabled() and resume() aren't there, then it 104For example, if mmio_enabled() and resume() aren't there, then it
106is assumed that the driver is not doing any direct recovery and requires 105is assumed that the driver is not doing any direct recovery and requires
107a slot reset. If link_reset() is not implemented, the card is assumed to 106a slot reset. Typically a driver will want to know about
108not care about link resets. Typically a driver will want to know about
109a slot_reset(). 107a slot_reset().
110 108
111The actual steps taken by a platform to recover from a PCI error 109The actual steps taken by a platform to recover from a PCI error
@@ -232,25 +230,9 @@ proceeds to STEP 4 (Slot Reset)
232 230
233STEP 3: Link Reset 231STEP 3: Link Reset
234------------------ 232------------------
235The platform resets the link, and then calls the link_reset() callback 233The platform resets the link. This is a PCI-Express specific step
236on all affected device drivers. This is a PCI-Express specific state
237and is done whenever a non-fatal error has been detected that can be 234and is done whenever a non-fatal error has been detected that can be
238"solved" by resetting the link. This call informs the driver of the 235"solved" by resetting the link.
239reset and the driver should check to see if the device appears to be
240in working condition.
241
242The driver is not supposed to restart normal driver I/O operations
243at this point. It should limit itself to "probing" the device to
244check its recoverability status. If all is right, then the platform
245will call resume() once all drivers have ack'd link_reset().
246
247 Result codes:
248 (identical to STEP 3 (MMIO Enabled)
249
250The platform then proceeds to either STEP 4 (Slot Reset) or STEP 5
251(Resume Operations).
252
253>>> The current powerpc implementation does not implement this callback.
254 236
255STEP 4: Slot Reset 237STEP 4: Slot Reset
256------------------ 238------------------
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index 77f49dc5be23..611a75e4366e 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -382,18 +382,18 @@ The fundamental difference between MSI and MSI-X is how multiple
382"vectors" get allocated. MSI requires contiguous blocks of vectors 382"vectors" get allocated. MSI requires contiguous blocks of vectors
383while MSI-X can allocate several individual ones. 383while MSI-X can allocate several individual ones.
384 384
385MSI capability can be enabled by calling pci_enable_msi() or 385MSI capability can be enabled by calling pci_alloc_irq_vectors() with the
386pci_enable_msix() before calling request_irq(). This causes 386PCI_IRQ_MSI and/or PCI_IRQ_MSIX flags before calling request_irq(). This
387the PCI support to program CPU vector data into the PCI device 387causes the PCI support to program CPU vector data into the PCI device
388capability registers. 388capability registers. Many architectures, chip-sets, or BIOSes do NOT
389 389support MSI or MSI-X and a call to pci_alloc_irq_vectors with just
390If your PCI device supports both, try to enable MSI-X first. 390the PCI_IRQ_MSI and PCI_IRQ_MSIX flags will fail, so try to always
391Only one can be enabled at a time. Many architectures, chip-sets, 391specify PCI_IRQ_LEGACY as well.
392or BIOSes do NOT support MSI or MSI-X and the call to pci_enable_msi/msix 392
393will fail. This is important to note since many drivers have 393Drivers that have different interrupt handlers for MSI/MSI-X and
394two (or more) interrupt handlers: one for MSI/MSI-X and another for IRQs. 394legacy INTx should chose the right one based on the msi_enabled
395They choose which handler to register with request_irq() based on the 395and msix_enabled flags in the pci_dev structure after calling
396return value from pci_enable_msi/msix(). 396pci_alloc_irq_vectors.
397 397
398There are (at least) two really good reasons for using MSI: 398There are (at least) two really good reasons for using MSI:
3991) MSI is an exclusive interrupt vector by definition. 3991) MSI is an exclusive interrupt vector by definition.
diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
index 59c2f47aa303..b7fa3b97986d 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
@@ -42,3 +42,40 @@ Hip05 Example (note that Hip06 is the same except compatible):
42 0x0 0 0 4 &mbigen_pcie 4 13>; 42 0x0 0 0 4 &mbigen_pcie 4 13>;
43 status = "ok"; 43 status = "ok";
44 }; 44 };
45
46HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
47The properties and their meanings are identical to those described in
48host-generic-pci.txt except as listed below.
49
50Properties of the host controller node that differ from
51host-generic-pci.txt:
52
53- compatible : Must be "hisilicon,pcie-almost-ecam"
54
55- reg : Two entries: First the ECAM configuration space for any
56 other bus underneath the root bus. Second, the base
57 and size of the HiSilicon host bridge registers include
58 the RC's own config space.
59
60Example:
61 pcie0: pcie@a0090000 {
62 compatible = "hisilicon,pcie-almost-ecam";
63 reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */
64 <0 0xa0090000 0 0x10000>; /* host bridge registers */
65 bus-range = <0 31>;
66 msi-map = <0x0000 &its_dsa 0x0000 0x2000>;
67 msi-map-mask = <0xffff>;
68 #address-cells = <3>;
69 #size-cells = <2>;
70 device_type = "pci";
71 dma-coherent;
72 ranges = <0x02000000 0 0xb2000000 0x0 0xb2000000 0 0x5ff0000
73 0x01000000 0 0 0 0xb7ff0000 0 0x10000>;
74 #interrupt-cells = <1>;
75 interrupt-map-mask = <0xf800 0 0 7>;
76 interrupt-map = <0x0 0 0 1 &mbigen_pcie0 650 4
77 0x0 0 0 2 &mbigen_pcie0 650 4
78 0x0 0 0 3 &mbigen_pcie0 650 4
79 0x0 0 0 4 &mbigen_pcie0 650 4>;
80 status = "ok";
81 };
diff --git a/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt b/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt
index 4f9d23d2ed67..7d3b09474657 100644
--- a/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/samsung,exynos5440-pcie.txt
@@ -7,8 +7,19 @@ Required properties:
7- compatible: "samsung,exynos5440-pcie" 7- compatible: "samsung,exynos5440-pcie"
8- reg: base addresses and lengths of the pcie controller, 8- reg: base addresses and lengths of the pcie controller,
9 the phy controller, additional register for the phy controller. 9 the phy controller, additional register for the phy controller.
10 (Registers for the phy controller are DEPRECATED.
11 Use the PHY framework.)
12- reg-names : First name should be set to "elbi".
13 And use the "config" instead of getting the confgiruation address space
14 from "ranges".
15 NOTE: When use the "config" property, reg-names must be set.
10- interrupts: A list of interrupt outputs for level interrupt, 16- interrupts: A list of interrupt outputs for level interrupt,
11 pulse interrupt, special interrupt. 17 pulse interrupt, special interrupt.
18- phys: From PHY binding. Phandle for the Generic PHY.
19 Refer to Documentation/devicetree/bindings/phy/samsung-phy.txt
20
21Other common properties refer to
22 Documentation/devicetree/binding/pci/designware-pcie.txt
12 23
13Example: 24Example:
14 25
@@ -54,6 +65,24 @@ SoC specific DT Entry:
54 num-lanes = <4>; 65 num-lanes = <4>;
55 }; 66 };
56 67
68With using PHY framework:
69 pcie_phy0: pcie-phy@270000 {
70 ...
71 reg = <0x270000 0x1000>, <0x271000 0x40>;
72 reg-names = "phy", "block";
73 ...
74 };
75
76 pcie@290000 {
77 ...
78 reg = <0x290000 0x1000>, <0x40000000 0x1000>;
79 reg-names = "elbi", "config";
80 phys = <&pcie_phy0>;
81 ranges = <0x81000000 0 0 0x60001000 0 0x00010000
82 0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>;
83 ...
84 };
85
57Board specific DT Entry: 86Board specific DT Entry:
58 87
59 pcie@290000 { 88 pcie@290000 {
diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt
index 9872ba8546bd..ab80bfe31cb3 100644
--- a/Documentation/devicetree/bindings/phy/samsung-phy.txt
+++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt
@@ -191,3 +191,20 @@ Example:
191 usbdrdphy0 = &usb3_phy0; 191 usbdrdphy0 = &usb3_phy0;
192 usbdrdphy1 = &usb3_phy1; 192 usbdrdphy1 = &usb3_phy1;
193 }; 193 };
194
195Samsung Exynos SoC series PCIe PHY controller
196--------------------------------------------------
197Required properties:
198- compatible : Should be set to "samsung,exynos5440-pcie-phy"
199- #phy-cells : Must be zero
200- reg : a register used by phy driver.
201 - First is for phy register, second is for block register.
202- reg-names : Must be set to "phy" and "block".
203
204Example:
205 pcie_phy0: pcie-phy@270000 {
206 #phy-cells = <0>;
207 compatible = "samsung,exynos5440-pcie-phy";
208 reg = <0x270000 0x1000>, <0x271000 0x40>;
209 reg-names = "phy", "block";
210 };
diff --git a/MAINTAINERS b/MAINTAINERS
index cfff2c9e3d94..8672f18c8ab9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9429,7 +9429,7 @@ L: linux-pci@vger.kernel.org
9429L: linux-arm-kernel@lists.infradead.org 9429L: linux-arm-kernel@lists.infradead.org
9430S: Maintained 9430S: Maintained
9431F: Documentation/devicetree/bindings/pci/pci-armada8k.txt 9431F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
9432F: drivers/pci/host/pcie-armada8k.c 9432F: drivers/pci/dwc/pcie-armada8k.c
9433 9433
9434PCI DRIVER FOR APPLIEDMICRO XGENE 9434PCI DRIVER FOR APPLIEDMICRO XGENE
9435M: Tanmay Inamdar <tinamdar@apm.com> 9435M: Tanmay Inamdar <tinamdar@apm.com>
@@ -9447,7 +9447,7 @@ L: linuxppc-dev@lists.ozlabs.org
9447L: linux-pci@vger.kernel.org 9447L: linux-pci@vger.kernel.org
9448L: linux-arm-kernel@lists.infradead.org 9448L: linux-arm-kernel@lists.infradead.org
9449S: Maintained 9449S: Maintained
9450F: drivers/pci/host/*layerscape* 9450F: drivers/pci/dwc/*layerscape*
9451 9451
9452PCI DRIVER FOR IMX6 9452PCI DRIVER FOR IMX6
9453M: Richard Zhu <hongxing.zhu@nxp.com> 9453M: Richard Zhu <hongxing.zhu@nxp.com>
@@ -9456,14 +9456,14 @@ L: linux-pci@vger.kernel.org
9456L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9456L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9457S: Maintained 9457S: Maintained
9458F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt 9458F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
9459F: drivers/pci/host/*imx6* 9459F: drivers/pci/dwc/*imx6*
9460 9460
9461PCI DRIVER FOR TI KEYSTONE 9461PCI DRIVER FOR TI KEYSTONE
9462M: Murali Karicheri <m-karicheri2@ti.com> 9462M: Murali Karicheri <m-karicheri2@ti.com>
9463L: linux-pci@vger.kernel.org 9463L: linux-pci@vger.kernel.org
9464L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9464L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9465S: Maintained 9465S: Maintained
9466F: drivers/pci/host/*keystone* 9466F: drivers/pci/dwc/*keystone*
9467 9467
9468PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) 9468PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
9469M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9469M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
@@ -9495,7 +9495,7 @@ L: linux-omap@vger.kernel.org
9495L: linux-pci@vger.kernel.org 9495L: linux-pci@vger.kernel.org
9496S: Supported 9496S: Supported
9497F: Documentation/devicetree/bindings/pci/ti-pci.txt 9497F: Documentation/devicetree/bindings/pci/ti-pci.txt
9498F: drivers/pci/host/pci-dra7xx.c 9498F: drivers/pci/dwc/pci-dra7xx.c
9499 9499
9500PCI DRIVER FOR RENESAS R-CAR 9500PCI DRIVER FOR RENESAS R-CAR
9501M: Simon Horman <horms@verge.net.au> 9501M: Simon Horman <horms@verge.net.au>
@@ -9510,7 +9510,7 @@ L: linux-pci@vger.kernel.org
9510L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9510L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9511L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 9511L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
9512S: Maintained 9512S: Maintained
9513F: drivers/pci/host/pci-exynos.c 9513F: drivers/pci/dwc/pci-exynos.c
9514 9514
9515PCI DRIVER FOR SYNOPSIS DESIGNWARE 9515PCI DRIVER FOR SYNOPSIS DESIGNWARE
9516M: Jingoo Han <jingoohan1@gmail.com> 9516M: Jingoo Han <jingoohan1@gmail.com>
@@ -9518,7 +9518,7 @@ M: Joao Pinto <Joao.Pinto@synopsys.com>
9518L: linux-pci@vger.kernel.org 9518L: linux-pci@vger.kernel.org
9519S: Maintained 9519S: Maintained
9520F: Documentation/devicetree/bindings/pci/designware-pcie.txt 9520F: Documentation/devicetree/bindings/pci/designware-pcie.txt
9521F: drivers/pci/host/*designware* 9521F: drivers/pci/dwc/*designware*
9522 9522
9523PCI DRIVER FOR GENERIC OF HOSTS 9523PCI DRIVER FOR GENERIC OF HOSTS
9524M: Will Deacon <will.deacon@arm.com> 9524M: Will Deacon <will.deacon@arm.com>
@@ -9539,7 +9539,7 @@ PCIE DRIVER FOR ST SPEAR13XX
9539M: Pratyush Anand <pratyush.anand@gmail.com> 9539M: Pratyush Anand <pratyush.anand@gmail.com>
9540L: linux-pci@vger.kernel.org 9540L: linux-pci@vger.kernel.org
9541S: Maintained 9541S: Maintained
9542F: drivers/pci/host/*spear* 9542F: drivers/pci/dwc/*spear*
9543 9543
9544PCI MSI DRIVER FOR ALTERA MSI IP 9544PCI MSI DRIVER FOR ALTERA MSI IP
9545M: Ley Foon Tan <lftan@altera.com> 9545M: Ley Foon Tan <lftan@altera.com>
@@ -9564,7 +9564,7 @@ L: linux-arm-kernel@axis.com
9564L: linux-pci@vger.kernel.org 9564L: linux-pci@vger.kernel.org
9565S: Maintained 9565S: Maintained
9566F: Documentation/devicetree/bindings/pci/axis,artpec* 9566F: Documentation/devicetree/bindings/pci/axis,artpec*
9567F: drivers/pci/host/*artpec* 9567F: drivers/pci/dwc/*artpec*
9568 9568
9569PCIE DRIVER FOR HISILICON 9569PCIE DRIVER FOR HISILICON
9570M: Zhou Wang <wangzhou1@hisilicon.com> 9570M: Zhou Wang <wangzhou1@hisilicon.com>
@@ -9572,7 +9572,7 @@ M: Gabriele Paoloni <gabriele.paoloni@huawei.com>
9572L: linux-pci@vger.kernel.org 9572L: linux-pci@vger.kernel.org
9573S: Maintained 9573S: Maintained
9574F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt 9574F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
9575F: drivers/pci/host/pcie-hisi.c 9575F: drivers/pci/dwc/pcie-hisi.c
9576 9576
9577PCIE DRIVER FOR ROCKCHIP 9577PCIE DRIVER FOR ROCKCHIP
9578M: Shawn Lin <shawn.lin@rock-chips.com> 9578M: Shawn Lin <shawn.lin@rock-chips.com>
@@ -9588,7 +9588,7 @@ M: Stanimir Varbanov <svarbanov@mm-sol.com>
9588L: linux-pci@vger.kernel.org 9588L: linux-pci@vger.kernel.org
9589L: linux-arm-msm@vger.kernel.org 9589L: linux-arm-msm@vger.kernel.org
9590S: Maintained 9590S: Maintained
9591F: drivers/pci/host/*qcom* 9591F: drivers/pci/dwc/*qcom*
9592 9592
9593PCIE DRIVER FOR CAVIUM THUNDERX 9593PCIE DRIVER FOR CAVIUM THUNDERX
9594M: David Daney <david.daney@cavium.com> 9594M: David Daney <david.daney@cavium.com>
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 015bbf30e3e3..c61aec7e65f4 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -82,7 +82,7 @@ int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
82 if (domain == NULL) 82 if (domain == NULL)
83 return -ENOSYS; 83 return -ENOSYS;
84 84
85 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); 85 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
86} 86}
87 87
88void native_teardown_msi_irq(unsigned int irq) 88void native_teardown_msi_irq(unsigned int irq)
diff --git a/drivers/Makefile b/drivers/Makefile
index 060026a02f59..f521cb0e58d6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -15,6 +15,9 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
15obj-$(CONFIG_GPIOLIB) += gpio/ 15obj-$(CONFIG_GPIOLIB) += gpio/
16obj-y += pwm/ 16obj-y += pwm/
17obj-$(CONFIG_PCI) += pci/ 17obj-$(CONFIG_PCI) += pci/
18# PCI dwc controller drivers
19obj-y += pci/dwc/
20
18obj-$(CONFIG_PARISC) += parisc/ 21obj-$(CONFIG_PARISC) += parisc/
19obj-$(CONFIG_RAPIDIO) += rapidio/ 22obj-$(CONFIG_RAPIDIO) += rapidio/
20obj-y += video/ 23obj-y += video/
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index a6a4ceaa6cc3..2944353253ed 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -195,11 +195,10 @@ int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
195 goto skip_lookup; 195 goto skip_lookup;
196 196
197 /* 197 /*
198 * We expect exact match, unless MCFG entry end bus covers more than 198 * We expect the range in bus_res in the coverage of MCFG bus range.
199 * specified by caller.
200 */ 199 */
201 list_for_each_entry(e, &pci_mcfg_list, list) { 200 list_for_each_entry(e, &pci_mcfg_list, list) {
202 if (e->segment == seg && e->bus_start == bus_res->start && 201 if (e->segment == seg && e->bus_start <= bus_res->start &&
203 e->bus_end >= bus_res->end) { 202 e->bus_end >= bus_res->end) {
204 root->mcfg_addr = e->addr; 203 root->mcfg_addr = e->addr;
205 } 204 }
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 4ac8f330c5cb..ebd941fc8a92 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -598,15 +598,6 @@ pci_slot_reset(struct pci_dev *pdev)
598 return PCI_ERS_RESULT_CAN_RECOVER; 598 return PCI_ERS_RESULT_CAN_RECOVER;
599} 599}
600 600
601static pci_ers_result_t
602pci_link_reset(struct pci_dev *pdev)
603{
604 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
605
606 dd_dev_info(dd, "HFI1 link_reset function called, ignored\n");
607 return PCI_ERS_RESULT_CAN_RECOVER;
608}
609
610static void 601static void
611pci_resume(struct pci_dev *pdev) 602pci_resume(struct pci_dev *pdev)
612{ 603{
@@ -625,7 +616,6 @@ pci_resume(struct pci_dev *pdev)
625const struct pci_error_handlers hfi1_pci_err_handler = { 616const struct pci_error_handlers hfi1_pci_err_handler = {
626 .error_detected = pci_error_detected, 617 .error_detected = pci_error_detected,
627 .mmio_enabled = pci_mmio_enabled, 618 .mmio_enabled = pci_mmio_enabled,
628 .link_reset = pci_link_reset,
629 .slot_reset = pci_slot_reset, 619 .slot_reset = pci_slot_reset,
630 .resume = pci_resume, 620 .resume = pci_resume,
631}; 621};
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 6abe1c621aa4..c379b8342a09 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -682,13 +682,6 @@ qib_pci_slot_reset(struct pci_dev *pdev)
682 return PCI_ERS_RESULT_CAN_RECOVER; 682 return PCI_ERS_RESULT_CAN_RECOVER;
683} 683}
684 684
685static pci_ers_result_t
686qib_pci_link_reset(struct pci_dev *pdev)
687{
688 qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
689 return PCI_ERS_RESULT_CAN_RECOVER;
690}
691
692static void 685static void
693qib_pci_resume(struct pci_dev *pdev) 686qib_pci_resume(struct pci_dev *pdev)
694{ 687{
@@ -707,7 +700,6 @@ qib_pci_resume(struct pci_dev *pdev)
707const struct pci_error_handlers qib_pci_err_handler = { 700const struct pci_error_handlers qib_pci_err_handler = {
708 .error_detected = qib_pci_error_detected, 701 .error_detected = qib_pci_error_detected,
709 .mmio_enabled = qib_pci_mmio_enabled, 702 .mmio_enabled = qib_pci_mmio_enabled,
710 .link_reset = qib_pci_link_reset,
711 .slot_reset = qib_pci_slot_reset, 703 .slot_reset = qib_pci_slot_reset,
712 .resume = qib_pci_resume, 704 .resume = qib_pci_resume,
713}; 705};
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 979634000597..d5c911c09e2b 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev) 308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
309{ 309{
310 free_irq(pci_dev->irq, (void *)cobalt); 310 free_irq(pci_dev->irq, (void *)cobalt);
311 311 pci_free_irq_vectors(pci_dev);
312 if (cobalt->msi_enabled)
313 pci_disable_msi(pci_dev);
314} 312}
315 313
316static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, 314static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
387 from being generated. */ 385 from being generated. */
388 cobalt_set_interrupt(cobalt, false); 386 cobalt_set_interrupt(cobalt, false);
389 387
390 if (pci_enable_msi_range(pci_dev, 1, 1) < 1) { 388 if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
391 cobalt_err("Could not enable MSI\n"); 389 cobalt_err("Could not enable MSI\n");
392 cobalt->msi_enabled = false;
393 ret = -EIO; 390 ret = -EIO;
394 goto err_release; 391 goto err_release;
395 } 392 }
396 msi_config_show(cobalt, pci_dev); 393 msi_config_show(cobalt, pci_dev);
397 cobalt->msi_enabled = true;
398 394
399 /* Register IRQ */ 395 /* Register IRQ */
400 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED, 396 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index ed00dc9d9399..00f773ec359a 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -287,8 +287,6 @@ struct cobalt {
287 u32 irq_none; 287 u32 irq_none;
288 u32 irq_full_fifo; 288 u32 irq_full_fifo;
289 289
290 bool msi_enabled;
291
292 /* omnitek dma */ 290 /* omnitek dma */
293 int dma_channels; 291 int dma_channels;
294 int first_fifo_channel; 292 int first_fifo_channel;
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 423e8c889310..8438c1c8acde 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -781,12 +781,6 @@ static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
781 return PCI_ERS_RESULT_CAN_RECOVER; 781 return PCI_ERS_RESULT_CAN_RECOVER;
782} 782}
783 783
784static pci_ers_result_t ngene_link_reset(struct pci_dev *dev)
785{
786 printk(KERN_INFO DEVICE_NAME ": link reset\n");
787 return 0;
788}
789
790static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev) 784static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
791{ 785{
792 printk(KERN_INFO DEVICE_NAME ": slot reset\n"); 786 printk(KERN_INFO DEVICE_NAME ": slot reset\n");
@@ -800,7 +794,6 @@ static void ngene_resume(struct pci_dev *dev)
800 794
801static const struct pci_error_handlers ngene_errors = { 795static const struct pci_error_handlers ngene_errors = {
802 .error_detected = ngene_error_detected, 796 .error_detected = ngene_error_detected,
803 .link_reset = ngene_link_reset,
804 .slot_reset = ngene_slot_reset, 797 .slot_reset = ngene_slot_reset,
805 .resume = ngene_resume, 798 .resume = ngene_resume,
806}; 799};
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 6c1f49a85023..4fd21e86ad56 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1336,7 +1336,6 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
1336static struct pci_error_handlers genwqe_err_handler = { 1336static struct pci_error_handlers genwqe_err_handler = {
1337 .error_detected = genwqe_err_error_detected, 1337 .error_detected = genwqe_err_error_detected,
1338 .mmio_enabled = genwqe_err_result_none, 1338 .mmio_enabled = genwqe_err_result_none,
1339 .link_reset = genwqe_err_result_none,
1340 .slot_reset = genwqe_err_slot_reset, 1339 .slot_reset = genwqe_err_slot_reset,
1341 .resume = genwqe_err_resume, 1340 .resume = genwqe_err_resume,
1342}; 1341};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index e76b7f65b805..e43690288c59 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -122,104 +122,40 @@
122#include "xgbe.h" 122#include "xgbe.h"
123#include "xgbe-common.h" 123#include "xgbe-common.h"
124 124
125static int xgbe_config_msi(struct xgbe_prv_data *pdata) 125static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
126{ 126{
127 unsigned int msi_count; 127 unsigned int vector_count;
128 unsigned int i, j; 128 unsigned int i, j;
129 int ret; 129 int ret;
130 130
131 msi_count = XGBE_MSIX_BASE_COUNT; 131 vector_count = XGBE_MSI_BASE_COUNT;
132 msi_count += max(pdata->rx_ring_count, 132 vector_count += max(pdata->rx_ring_count,
133 pdata->tx_ring_count); 133 pdata->tx_ring_count);
134 msi_count = roundup_pow_of_two(msi_count);
135 134
136 ret = pci_enable_msi_exact(pdata->pcidev, msi_count); 135 ret = pci_alloc_irq_vectors(pdata->pcidev, XGBE_MSI_MIN_COUNT,
136 vector_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
137 if (ret < 0) { 137 if (ret < 0) {
138 dev_info(pdata->dev, "MSI request for %u interrupts failed\n", 138 dev_info(pdata->dev, "multi MSI/MSI-X enablement failed\n");
139 msi_count);
140
141 ret = pci_enable_msi(pdata->pcidev);
142 if (ret < 0) {
143 dev_info(pdata->dev, "MSI enablement failed\n");
144 return ret;
145 }
146
147 msi_count = 1;
148 }
149
150 pdata->irq_count = msi_count;
151
152 pdata->dev_irq = pdata->pcidev->irq;
153
154 if (msi_count > 1) {
155 pdata->ecc_irq = pdata->pcidev->irq + 1;
156 pdata->i2c_irq = pdata->pcidev->irq + 2;
157 pdata->an_irq = pdata->pcidev->irq + 3;
158
159 for (i = XGBE_MSIX_BASE_COUNT, j = 0;
160 (i < msi_count) && (j < XGBE_MAX_DMA_CHANNELS);
161 i++, j++)
162 pdata->channel_irq[j] = pdata->pcidev->irq + i;
163 pdata->channel_irq_count = j;
164
165 pdata->per_channel_irq = 1;
166 pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
167 } else {
168 pdata->ecc_irq = pdata->pcidev->irq;
169 pdata->i2c_irq = pdata->pcidev->irq;
170 pdata->an_irq = pdata->pcidev->irq;
171 }
172
173 if (netif_msg_probe(pdata))
174 dev_dbg(pdata->dev, "MSI interrupts enabled\n");
175
176 return 0;
177}
178
179static int xgbe_config_msix(struct xgbe_prv_data *pdata)
180{
181 unsigned int msix_count;
182 unsigned int i, j;
183 int ret;
184
185 msix_count = XGBE_MSIX_BASE_COUNT;
186 msix_count += max(pdata->rx_ring_count,
187 pdata->tx_ring_count);
188
189 pdata->msix_entries = devm_kcalloc(pdata->dev, msix_count,
190 sizeof(struct msix_entry),
191 GFP_KERNEL);
192 if (!pdata->msix_entries)
193 return -ENOMEM;
194
195 for (i = 0; i < msix_count; i++)
196 pdata->msix_entries[i].entry = i;
197
198 ret = pci_enable_msix_range(pdata->pcidev, pdata->msix_entries,
199 XGBE_MSIX_MIN_COUNT, msix_count);
200 if (ret < 0) {
201 dev_info(pdata->dev, "MSI-X enablement failed\n");
202 devm_kfree(pdata->dev, pdata->msix_entries);
203 pdata->msix_entries = NULL;
204 return ret; 139 return ret;
205 } 140 }
206 141
207 pdata->irq_count = ret; 142 pdata->irq_count = ret;
208 143
209 pdata->dev_irq = pdata->msix_entries[0].vector; 144 pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
210 pdata->ecc_irq = pdata->msix_entries[1].vector; 145 pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 1);
211 pdata->i2c_irq = pdata->msix_entries[2].vector; 146 pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 2);
212 pdata->an_irq = pdata->msix_entries[3].vector; 147 pdata->an_irq = pci_irq_vector(pdata->pcidev, 3);
213 148
214 for (i = XGBE_MSIX_BASE_COUNT, j = 0; i < ret; i++, j++) 149 for (i = XGBE_MSI_BASE_COUNT, j = 0; i < ret; i++, j++)
215 pdata->channel_irq[j] = pdata->msix_entries[i].vector; 150 pdata->channel_irq[j] = pci_irq_vector(pdata->pcidev, i);
216 pdata->channel_irq_count = j; 151 pdata->channel_irq_count = j;
217 152
218 pdata->per_channel_irq = 1; 153 pdata->per_channel_irq = 1;
219 pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; 154 pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
220 155
221 if (netif_msg_probe(pdata)) 156 if (netif_msg_probe(pdata))
222 dev_dbg(pdata->dev, "MSI-X interrupts enabled\n"); 157 dev_dbg(pdata->dev, "multi %s interrupts enabled\n",
158 pdata->pcidev->msix_enabled ? "MSI-X" : "MSI");
223 159
224 return 0; 160 return 0;
225} 161}
@@ -228,21 +164,28 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
228{ 164{
229 int ret; 165 int ret;
230 166
231 ret = xgbe_config_msix(pdata); 167 ret = xgbe_config_multi_msi(pdata);
232 if (!ret) 168 if (!ret)
233 goto out; 169 goto out;
234 170
235 ret = xgbe_config_msi(pdata); 171 ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
236 if (!ret) 172 PCI_IRQ_LEGACY | PCI_IRQ_MSI);
237 goto out; 173 if (ret < 0) {
174 dev_info(pdata->dev, "single IRQ enablement failed\n");
175 return ret;
176 }
238 177
239 pdata->irq_count = 1; 178 pdata->irq_count = 1;
240 pdata->irq_shared = 1; 179 pdata->channel_irq_count = 1;
180
181 pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
182 pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 0);
183 pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 0);
184 pdata->an_irq = pci_irq_vector(pdata->pcidev, 0);
241 185
242 pdata->dev_irq = pdata->pcidev->irq; 186 if (netif_msg_probe(pdata))
243 pdata->ecc_irq = pdata->pcidev->irq; 187 dev_dbg(pdata->dev, "single %s interrupt enabled\n",
244 pdata->i2c_irq = pdata->pcidev->irq; 188 pdata->pcidev->msi_enabled ? "MSI" : "legacy");
245 pdata->an_irq = pdata->pcidev->irq;
246 189
247out: 190out:
248 if (netif_msg_probe(pdata)) { 191 if (netif_msg_probe(pdata)) {
@@ -412,12 +355,15 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
412 /* Configure the netdev resource */ 355 /* Configure the netdev resource */
413 ret = xgbe_config_netdev(pdata); 356 ret = xgbe_config_netdev(pdata);
414 if (ret) 357 if (ret)
415 goto err_pci_enable; 358 goto err_irq_vectors;
416 359
417 netdev_notice(pdata->netdev, "net device enabled\n"); 360 netdev_notice(pdata->netdev, "net device enabled\n");
418 361
419 return 0; 362 return 0;
420 363
364err_irq_vectors:
365 pci_free_irq_vectors(pdata->pcidev);
366
421err_pci_enable: 367err_pci_enable:
422 xgbe_free_pdata(pdata); 368 xgbe_free_pdata(pdata);
423 369
@@ -433,6 +379,8 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
433 379
434 xgbe_deconfig_netdev(pdata); 380 xgbe_deconfig_netdev(pdata);
435 381
382 pci_free_irq_vectors(pdata->pcidev);
383
436 xgbe_free_pdata(pdata); 384 xgbe_free_pdata(pdata);
437} 385}
438 386
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f52a9bd05bac..99f1c87df818 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -211,9 +211,9 @@
211#define XGBE_MAC_PROP_OFFSET 0x1d000 211#define XGBE_MAC_PROP_OFFSET 0x1d000
212#define XGBE_I2C_CTRL_OFFSET 0x1e000 212#define XGBE_I2C_CTRL_OFFSET 0x1e000
213 213
214/* PCI MSIx support */ 214/* PCI MSI/MSIx support */
215#define XGBE_MSIX_BASE_COUNT 4 215#define XGBE_MSI_BASE_COUNT 4
216#define XGBE_MSIX_MIN_COUNT (XGBE_MSIX_BASE_COUNT + 1) 216#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
217 217
218/* PCI clock frequencies */ 218/* PCI clock frequencies */
219#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */ 219#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
@@ -980,14 +980,12 @@ struct xgbe_prv_data {
980 unsigned int desc_ded_count; 980 unsigned int desc_ded_count;
981 unsigned int desc_sec_count; 981 unsigned int desc_sec_count;
982 982
983 struct msix_entry *msix_entries;
984 int dev_irq; 983 int dev_irq;
985 int ecc_irq; 984 int ecc_irq;
986 int i2c_irq; 985 int i2c_irq;
987 int channel_irq[XGBE_MAX_DMA_CHANNELS]; 986 int channel_irq[XGBE_MAX_DMA_CHANNELS];
988 987
989 unsigned int per_channel_irq; 988 unsigned int per_channel_irq;
990 unsigned int irq_shared;
991 unsigned int irq_count; 989 unsigned int irq_count;
992 unsigned int channel_irq_count; 990 unsigned int channel_irq_count;
993 unsigned int channel_irq_mode; 991 unsigned int channel_irq_mode;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 6555eb78d91c..df141420c902 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -132,4 +132,5 @@ config PCI_HYPERV
132 PCI devices from a PCI backend to support PCI driver domains. 132 PCI devices from a PCI backend to support PCI driver domains.
133 133
134source "drivers/pci/hotplug/Kconfig" 134source "drivers/pci/hotplug/Kconfig"
135source "drivers/pci/dwc/Kconfig"
135source "drivers/pci/host/Kconfig" 136source "drivers/pci/host/Kconfig"
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index db239547fefd..b9dd37c8c9ce 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -367,7 +367,7 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
367static int pci_vpd_wait(struct pci_dev *dev) 367static int pci_vpd_wait(struct pci_dev *dev)
368{ 368{
369 struct pci_vpd *vpd = dev->vpd; 369 struct pci_vpd *vpd = dev->vpd;
370 unsigned long timeout = jiffies + msecs_to_jiffies(50); 370 unsigned long timeout = jiffies + msecs_to_jiffies(125);
371 unsigned long max_sleep = 16; 371 unsigned long max_sleep = 16;
372 u16 status; 372 u16 status;
373 int ret; 373 int ret;
@@ -684,8 +684,9 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
684 WARN_ON(!dev->block_cfg_access); 684 WARN_ON(!dev->block_cfg_access);
685 685
686 dev->block_cfg_access = 0; 686 dev->block_cfg_access = 0;
687 wake_up_all(&pci_cfg_wait);
688 raw_spin_unlock_irqrestore(&pci_lock, flags); 687 raw_spin_unlock_irqrestore(&pci_lock, flags);
688
689 wake_up_all(&pci_cfg_wait);
689} 690}
690EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 691EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
691 692
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
new file mode 100644
index 000000000000..dfb8a69afc28
--- /dev/null
+++ b/drivers/pci/dwc/Kconfig
@@ -0,0 +1,132 @@
1menu "DesignWare PCI Core Support"
2
3config PCIE_DW
4 bool
5
6config PCIE_DW_HOST
7 bool
8 depends on PCI
9 depends on PCI_MSI_IRQ_DOMAIN
10 select PCIE_DW
11
12config PCI_DRA7XX
13 bool "TI DRA7xx PCIe controller"
14 depends on PCI
15 depends on OF && HAS_IOMEM && TI_PIPE3
16 depends on PCI_MSI_IRQ_DOMAIN
17 select PCIE_DW_HOST
18 help
19 Enables support for the PCIe controller in the DRA7xx SoC. There
20 are two instances of PCIe controller in DRA7xx. This controller can
21 act both as EP and RC. This reuses the Designware core.
22
23config PCIE_DW_PLAT
24 bool "Platform bus based DesignWare PCIe Controller"
25 depends on PCI
26 depends on PCI_MSI_IRQ_DOMAIN
27 select PCIE_DW_HOST
28 ---help---
29 This selects the DesignWare PCIe controller support. Select this if
30 you have a PCIe controller on Platform bus.
31
32 If you have a controller with this interface, say Y or M here.
33
34 If unsure, say N.
35
36config PCI_EXYNOS
37 bool "Samsung Exynos PCIe controller"
38 depends on PCI
39 depends on SOC_EXYNOS5440
40 depends on PCI_MSI_IRQ_DOMAIN
41 select PCIEPORTBUS
42 select PCIE_DW_HOST
43
44config PCI_IMX6
45 bool "Freescale i.MX6 PCIe controller"
46 depends on PCI
47 depends on SOC_IMX6Q
48 depends on PCI_MSI_IRQ_DOMAIN
49 select PCIEPORTBUS
50 select PCIE_DW_HOST
51
52config PCIE_SPEAR13XX
53 bool "STMicroelectronics SPEAr PCIe controller"
54 depends on PCI
55 depends on ARCH_SPEAR13XX
56 depends on PCI_MSI_IRQ_DOMAIN
57 select PCIEPORTBUS
58 select PCIE_DW_HOST
59 help
60 Say Y here if you want PCIe support on SPEAr13XX SoCs.
61
62config PCI_KEYSTONE
63 bool "TI Keystone PCIe controller"
64 depends on PCI
65 depends on ARCH_KEYSTONE
66 depends on PCI_MSI_IRQ_DOMAIN
67 select PCIEPORTBUS
68 select PCIE_DW_HOST
69 help
70 Say Y here if you want to enable PCI controller support on Keystone
71 SoCs. The PCI controller on Keystone is based on Designware hardware
72 and therefore the driver re-uses the Designware core functions to
73 implement the driver.
74
75config PCI_LAYERSCAPE
76 bool "Freescale Layerscape PCIe controller"
77 depends on PCI
78 depends on OF && (ARM || ARCH_LAYERSCAPE)
79 depends on PCI_MSI_IRQ_DOMAIN
80 select MFD_SYSCON
81 select PCIE_DW_HOST
82 help
83 Say Y here if you want PCIe controller support on Layerscape SoCs.
84
85config PCI_HISI
86 depends on OF && ARM64
87 bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
88 depends on PCI
89 depends on PCI_MSI_IRQ_DOMAIN
90 select PCIEPORTBUS
91 select PCIE_DW_HOST
92 help
93 Say Y here if you want PCIe controller support on HiSilicon
94 Hip05 and Hip06 SoCs
95
96config PCIE_QCOM
97 bool "Qualcomm PCIe controller"
98 depends on PCI
99 depends on ARCH_QCOM && OF
100 depends on PCI_MSI_IRQ_DOMAIN
101 select PCIEPORTBUS
102 select PCIE_DW_HOST
103 help
104 Say Y here to enable PCIe controller support on Qualcomm SoCs. The
105 PCIe controller uses the Designware core plus Qualcomm-specific
106 hardware wrappers.
107
108config PCIE_ARMADA_8K
109 bool "Marvell Armada-8K PCIe controller"
110 depends on PCI
111 depends on ARCH_MVEBU
112 depends on PCI_MSI_IRQ_DOMAIN
113 select PCIEPORTBUS
114 select PCIE_DW_HOST
115 help
116 Say Y here if you want to enable PCIe controller support on
117 Armada-8K SoCs. The PCIe controller on Armada-8K is based on
118 Designware hardware and therefore the driver re-uses the
119 Designware core functions to implement the driver.
120
121config PCIE_ARTPEC6
122 bool "Axis ARTPEC-6 PCIe controller"
123 depends on PCI
124 depends on MACH_ARTPEC6
125 depends on PCI_MSI_IRQ_DOMAIN
126 select PCIEPORTBUS
127 select PCIE_DW_HOST
128 help
129 Say Y here to enable PCIe controller support on Axis ARTPEC-6
130 SoCs. This PCIe controller uses the DesignWare core.
131
132endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
new file mode 100644
index 000000000000..a2df13c28798
--- /dev/null
+++ b/drivers/pci/dwc/Makefile
@@ -0,0 +1,24 @@
1obj-$(CONFIG_PCIE_DW) += pcie-designware.o
2obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
3obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
4obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
5obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
6obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
7obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
8obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
9obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
10obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
11obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
12obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
13
14# The following drivers are for devices that use the generic ACPI
15# pci_root.c driver but don't support standard ECAM config access.
16# They contain MCFG quirks to replace the generic ECAM accessors with
17# device-specific ones that are shared with the DT driver.
18
19# The ACPI driver is generic and should not require driver-specific
20# config options to be enabled, so we always build these drivers on
21# ARM64 and use internal ifdefs to only build the pieces we need
22# depending on whether ACPI, the DT driver, or both are enabled.
23
24obj-$(CONFIG_ARM64) += pcie-hisi.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 9595fad63f6f..0984baff07e3 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20#include <linux/of_pci.h>
20#include <linux/pci.h> 21#include <linux/pci.h>
21#include <linux/phy/phy.h> 22#include <linux/phy/phy.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
@@ -63,14 +64,18 @@
63#define LINK_UP BIT(16) 64#define LINK_UP BIT(16)
64#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 65#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
65 66
67#define EXP_CAP_ID_OFFSET 0x70
68
66struct dra7xx_pcie { 69struct dra7xx_pcie {
67 struct pcie_port pp; 70 struct dw_pcie *pci;
68 void __iomem *base; /* DT ti_conf */ 71 void __iomem *base; /* DT ti_conf */
69 int phy_count; /* DT phy-names count */ 72 int phy_count; /* DT phy-names count */
70 struct phy **phy; 73 struct phy **phy;
74 int link_gen;
75 struct irq_domain *irq_domain;
71}; 76};
72 77
73#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp) 78#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
74 79
75static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 80static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
76{ 81{
@@ -83,9 +88,9 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
83 writel(value, pcie->base + offset); 88 writel(value, pcie->base + offset);
84} 89}
85 90
86static int dra7xx_pcie_link_up(struct pcie_port *pp) 91static int dra7xx_pcie_link_up(struct dw_pcie *pci)
87{ 92{
88 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); 93 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
89 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 94 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
90 95
91 return !!(reg & LINK_UP); 96 return !!(reg & LINK_UP);
@@ -93,20 +98,41 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
93 98
94static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx) 99static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
95{ 100{
96 struct pcie_port *pp = &dra7xx->pp; 101 struct dw_pcie *pci = dra7xx->pci;
97 struct device *dev = pp->dev; 102 struct device *dev = pci->dev;
98 u32 reg; 103 u32 reg;
104 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
99 105
100 if (dw_pcie_link_up(pp)) { 106 if (dw_pcie_link_up(pci)) {
101 dev_err(dev, "link is already up\n"); 107 dev_err(dev, "link is already up\n");
102 return 0; 108 return 0;
103 } 109 }
104 110
111 if (dra7xx->link_gen == 1) {
112 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
113 4, &reg);
114 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
115 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
116 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
117 dw_pcie_write(pci->dbi_base + exp_cap_off +
118 PCI_EXP_LNKCAP, 4, reg);
119 }
120
121 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
122 2, &reg);
123 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
124 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
125 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
126 dw_pcie_write(pci->dbi_base + exp_cap_off +
127 PCI_EXP_LNKCTL2, 2, reg);
128 }
129 }
130
105 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 131 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
106 reg |= LTSSM_EN; 132 reg |= LTSSM_EN;
107 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 133 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
108 134
109 return dw_pcie_wait_for_link(pp); 135 return dw_pcie_wait_for_link(pci);
110} 136}
111 137
112static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 138static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
@@ -117,19 +143,14 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
117 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS); 143 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
118 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 144 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
119 ~LEG_EP_INTERRUPTS & ~MSI); 145 ~LEG_EP_INTERRUPTS & ~MSI);
120 146 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
121 if (IS_ENABLED(CONFIG_PCI_MSI)) 147 MSI | LEG_EP_INTERRUPTS);
122 dra7xx_pcie_writel(dra7xx,
123 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI);
124 else
125 dra7xx_pcie_writel(dra7xx,
126 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
127 LEG_EP_INTERRUPTS);
128} 148}
129 149
130static void dra7xx_pcie_host_init(struct pcie_port *pp) 150static void dra7xx_pcie_host_init(struct pcie_port *pp)
131{ 151{
132 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); 152 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
153 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
133 154
134 pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; 155 pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
135 pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; 156 pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
@@ -139,13 +160,11 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
139 dw_pcie_setup_rc(pp); 160 dw_pcie_setup_rc(pp);
140 161
141 dra7xx_pcie_establish_link(dra7xx); 162 dra7xx_pcie_establish_link(dra7xx);
142 if (IS_ENABLED(CONFIG_PCI_MSI)) 163 dw_pcie_msi_init(pp);
143 dw_pcie_msi_init(pp);
144 dra7xx_pcie_enable_interrupts(dra7xx); 164 dra7xx_pcie_enable_interrupts(dra7xx);
145} 165}
146 166
147static struct pcie_host_ops dra7xx_pcie_host_ops = { 167static struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
148 .link_up = dra7xx_pcie_link_up,
149 .host_init = dra7xx_pcie_host_init, 168 .host_init = dra7xx_pcie_host_init,
150}; 169};
151 170
@@ -164,7 +183,9 @@ static const struct irq_domain_ops intx_domain_ops = {
164 183
165static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 184static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
166{ 185{
167 struct device *dev = pp->dev; 186 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
187 struct device *dev = pci->dev;
188 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
168 struct device_node *node = dev->of_node; 189 struct device_node *node = dev->of_node;
169 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 190 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
170 191
@@ -173,9 +194,9 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
173 return -ENODEV; 194 return -ENODEV;
174 } 195 }
175 196
176 pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, 197 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
177 &intx_domain_ops, pp); 198 &intx_domain_ops, pp);
178 if (!pp->irq_domain) { 199 if (!dra7xx->irq_domain) {
179 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 200 dev_err(dev, "Failed to get a INTx IRQ domain\n");
180 return -ENODEV; 201 return -ENODEV;
181 } 202 }
@@ -186,7 +207,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
186static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) 207static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
187{ 208{
188 struct dra7xx_pcie *dra7xx = arg; 209 struct dra7xx_pcie *dra7xx = arg;
189 struct pcie_port *pp = &dra7xx->pp; 210 struct dw_pcie *pci = dra7xx->pci;
211 struct pcie_port *pp = &pci->pp;
190 u32 reg; 212 u32 reg;
191 213
192 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 214 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
@@ -199,7 +221,8 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
199 case INTB: 221 case INTB:
200 case INTC: 222 case INTC:
201 case INTD: 223 case INTD:
202 generic_handle_irq(irq_find_mapping(pp->irq_domain, ffs(reg))); 224 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
225 ffs(reg)));
203 break; 226 break;
204 } 227 }
205 228
@@ -212,7 +235,8 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
212static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 235static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
213{ 236{
214 struct dra7xx_pcie *dra7xx = arg; 237 struct dra7xx_pcie *dra7xx = arg;
215 struct device *dev = dra7xx->pp.dev; 238 struct dw_pcie *pci = dra7xx->pci;
239 struct device *dev = pci->dev;
216 u32 reg; 240 u32 reg;
217 241
218 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 242 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
@@ -267,8 +291,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
267 struct platform_device *pdev) 291 struct platform_device *pdev)
268{ 292{
269 int ret; 293 int ret;
270 struct pcie_port *pp = &dra7xx->pp; 294 struct dw_pcie *pci = dra7xx->pci;
271 struct device *dev = pp->dev; 295 struct pcie_port *pp = &pci->pp;
296 struct device *dev = pci->dev;
272 struct resource *res; 297 struct resource *res;
273 298
274 pp->irq = platform_get_irq(pdev, 1); 299 pp->irq = platform_get_irq(pdev, 1);
@@ -285,15 +310,13 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
285 return ret; 310 return ret;
286 } 311 }
287 312
288 if (!IS_ENABLED(CONFIG_PCI_MSI)) { 313 ret = dra7xx_pcie_init_irq_domain(pp);
289 ret = dra7xx_pcie_init_irq_domain(pp); 314 if (ret < 0)
290 if (ret < 0) 315 return ret;
291 return ret;
292 }
293 316
294 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); 317 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
295 pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); 318 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
296 if (!pp->dbi_base) 319 if (!pci->dbi_base)
297 return -ENOMEM; 320 return -ENOMEM;
298 321
299 ret = dw_pcie_host_init(pp); 322 ret = dw_pcie_host_init(pp);
@@ -305,6 +328,49 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
305 return 0; 328 return 0;
306} 329}
307 330
331static const struct dw_pcie_ops dw_pcie_ops = {
332 .link_up = dra7xx_pcie_link_up,
333};
334
335static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
336{
337 int phy_count = dra7xx->phy_count;
338
339 while (phy_count--) {
340 phy_power_off(dra7xx->phy[phy_count]);
341 phy_exit(dra7xx->phy[phy_count]);
342 }
343}
344
345static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
346{
347 int phy_count = dra7xx->phy_count;
348 int ret;
349 int i;
350
351 for (i = 0; i < phy_count; i++) {
352 ret = phy_init(dra7xx->phy[i]);
353 if (ret < 0)
354 goto err_phy;
355
356 ret = phy_power_on(dra7xx->phy[i]);
357 if (ret < 0) {
358 phy_exit(dra7xx->phy[i]);
359 goto err_phy;
360 }
361 }
362
363 return 0;
364
365err_phy:
366 while (--i >= 0) {
367 phy_power_off(dra7xx->phy[i]);
368 phy_exit(dra7xx->phy[i]);
369 }
370
371 return ret;
372}
373
308static int __init dra7xx_pcie_probe(struct platform_device *pdev) 374static int __init dra7xx_pcie_probe(struct platform_device *pdev)
309{ 375{
310 u32 reg; 376 u32 reg;
@@ -315,21 +381,26 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
315 struct phy **phy; 381 struct phy **phy;
316 void __iomem *base; 382 void __iomem *base;
317 struct resource *res; 383 struct resource *res;
318 struct dra7xx_pcie *dra7xx; 384 struct dw_pcie *pci;
319 struct pcie_port *pp; 385 struct pcie_port *pp;
386 struct dra7xx_pcie *dra7xx;
320 struct device *dev = &pdev->dev; 387 struct device *dev = &pdev->dev;
321 struct device_node *np = dev->of_node; 388 struct device_node *np = dev->of_node;
322 char name[10]; 389 char name[10];
323 int gpio_sel; 390 struct gpio_desc *reset;
324 enum of_gpio_flags flags;
325 unsigned long gpio_flags;
326 391
327 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 392 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
328 if (!dra7xx) 393 if (!dra7xx)
329 return -ENOMEM; 394 return -ENOMEM;
330 395
331 pp = &dra7xx->pp; 396 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
332 pp->dev = dev; 397 if (!pci)
398 return -ENOMEM;
399
400 pci->dev = dev;
401 pci->ops = &dw_pcie_ops;
402
403 pp = &pci->pp;
333 pp->ops = &dra7xx_pcie_host_ops; 404 pp->ops = &dra7xx_pcie_host_ops;
334 405
335 irq = platform_get_irq(pdev, 0); 406 irq = platform_get_irq(pdev, 0);
@@ -365,22 +436,21 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
365 phy[i] = devm_phy_get(dev, name); 436 phy[i] = devm_phy_get(dev, name);
366 if (IS_ERR(phy[i])) 437 if (IS_ERR(phy[i]))
367 return PTR_ERR(phy[i]); 438 return PTR_ERR(phy[i]);
368
369 ret = phy_init(phy[i]);
370 if (ret < 0)
371 goto err_phy;
372
373 ret = phy_power_on(phy[i]);
374 if (ret < 0) {
375 phy_exit(phy[i]);
376 goto err_phy;
377 }
378 } 439 }
379 440
380 dra7xx->base = base; 441 dra7xx->base = base;
381 dra7xx->phy = phy; 442 dra7xx->phy = phy;
443 dra7xx->pci = pci;
382 dra7xx->phy_count = phy_count; 444 dra7xx->phy_count = phy_count;
383 445
446 ret = dra7xx_pcie_enable_phy(dra7xx);
447 if (ret) {
448 dev_err(dev, "failed to enable phy\n");
449 return ret;
450 }
451
452 platform_set_drvdata(pdev, dra7xx);
453
384 pm_runtime_enable(dev); 454 pm_runtime_enable(dev);
385 ret = pm_runtime_get_sync(dev); 455 ret = pm_runtime_get_sync(dev);
386 if (ret < 0) { 456 if (ret < 0) {
@@ -388,19 +458,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
388 goto err_get_sync; 458 goto err_get_sync;
389 } 459 }
390 460
391 gpio_sel = of_get_gpio_flags(dev->of_node, 0, &flags); 461 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
392 if (gpio_is_valid(gpio_sel)) { 462 if (IS_ERR(reset)) {
393 gpio_flags = (flags & OF_GPIO_ACTIVE_LOW) ? 463 ret = PTR_ERR(reset);
394 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH; 464 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
395 ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
396 "pcie_reset");
397 if (ret) {
398 dev_err(dev, "gpio%d request failed, ret %d\n",
399 gpio_sel, ret);
400 goto err_gpio;
401 }
402 } else if (gpio_sel == -EPROBE_DEFER) {
403 ret = -EPROBE_DEFER;
404 goto err_gpio; 465 goto err_gpio;
405 } 466 }
406 467
@@ -408,11 +469,14 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
408 reg &= ~LTSSM_EN; 469 reg &= ~LTSSM_EN;
409 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 470 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
410 471
472 dra7xx->link_gen = of_pci_get_max_link_speed(np);
473 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
474 dra7xx->link_gen = 2;
475
411 ret = dra7xx_add_pcie_port(dra7xx, pdev); 476 ret = dra7xx_add_pcie_port(dra7xx, pdev);
412 if (ret < 0) 477 if (ret < 0)
413 goto err_gpio; 478 goto err_gpio;
414 479
415 platform_set_drvdata(pdev, dra7xx);
416 return 0; 480 return 0;
417 481
418err_gpio: 482err_gpio:
@@ -420,12 +484,7 @@ err_gpio:
420 484
421err_get_sync: 485err_get_sync:
422 pm_runtime_disable(dev); 486 pm_runtime_disable(dev);
423 487 dra7xx_pcie_disable_phy(dra7xx);
424err_phy:
425 while (--i >= 0) {
426 phy_power_off(phy[i]);
427 phy_exit(phy[i]);
428 }
429 488
430 return ret; 489 return ret;
431} 490}
@@ -434,13 +493,13 @@ err_phy:
434static int dra7xx_pcie_suspend(struct device *dev) 493static int dra7xx_pcie_suspend(struct device *dev)
435{ 494{
436 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 495 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
437 struct pcie_port *pp = &dra7xx->pp; 496 struct dw_pcie *pci = dra7xx->pci;
438 u32 val; 497 u32 val;
439 498
440 /* clear MSE */ 499 /* clear MSE */
441 val = dw_pcie_readl_rc(pp, PCI_COMMAND); 500 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
442 val &= ~PCI_COMMAND_MEMORY; 501 val &= ~PCI_COMMAND_MEMORY;
443 dw_pcie_writel_rc(pp, PCI_COMMAND, val); 502 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
444 503
445 return 0; 504 return 0;
446} 505}
@@ -448,13 +507,13 @@ static int dra7xx_pcie_suspend(struct device *dev)
448static int dra7xx_pcie_resume(struct device *dev) 507static int dra7xx_pcie_resume(struct device *dev)
449{ 508{
450 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 509 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
451 struct pcie_port *pp = &dra7xx->pp; 510 struct dw_pcie *pci = dra7xx->pci;
452 u32 val; 511 u32 val;
453 512
454 /* set MSE */ 513 /* set MSE */
455 val = dw_pcie_readl_rc(pp, PCI_COMMAND); 514 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
456 val |= PCI_COMMAND_MEMORY; 515 val |= PCI_COMMAND_MEMORY;
457 dw_pcie_writel_rc(pp, PCI_COMMAND, val); 516 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
458 517
459 return 0; 518 return 0;
460} 519}
@@ -462,12 +521,8 @@ static int dra7xx_pcie_resume(struct device *dev)
462static int dra7xx_pcie_suspend_noirq(struct device *dev) 521static int dra7xx_pcie_suspend_noirq(struct device *dev)
463{ 522{
464 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 523 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
465 int count = dra7xx->phy_count;
466 524
467 while (count--) { 525 dra7xx_pcie_disable_phy(dra7xx);
468 phy_power_off(dra7xx->phy[count]);
469 phy_exit(dra7xx->phy[count]);
470 }
471 526
472 return 0; 527 return 0;
473} 528}
@@ -475,31 +530,15 @@ static int dra7xx_pcie_suspend_noirq(struct device *dev)
475static int dra7xx_pcie_resume_noirq(struct device *dev) 530static int dra7xx_pcie_resume_noirq(struct device *dev)
476{ 531{
477 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 532 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
478 int phy_count = dra7xx->phy_count;
479 int ret; 533 int ret;
480 int i;
481
482 for (i = 0; i < phy_count; i++) {
483 ret = phy_init(dra7xx->phy[i]);
484 if (ret < 0)
485 goto err_phy;
486 534
487 ret = phy_power_on(dra7xx->phy[i]); 535 ret = dra7xx_pcie_enable_phy(dra7xx);
488 if (ret < 0) { 536 if (ret) {
489 phy_exit(dra7xx->phy[i]); 537 dev_err(dev, "failed to enable phy\n");
490 goto err_phy; 538 return ret;
491 }
492 } 539 }
493 540
494 return 0; 541 return 0;
495
496err_phy:
497 while (--i >= 0) {
498 phy_power_off(dra7xx->phy[i]);
499 phy_exit(dra7xx->phy[i]);
500 }
501
502 return ret;
503} 542}
504#endif 543#endif
505 544
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
new file mode 100644
index 000000000000..001c91a945aa
--- /dev/null
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -0,0 +1,751 @@
1/*
2 * PCIe host controller driver for Samsung EXYNOS SoCs
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/gpio.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/of_device.h>
21#include <linux/of_gpio.h>
22#include <linux/pci.h>
23#include <linux/platform_device.h>
24#include <linux/phy/phy.h>
25#include <linux/resource.h>
26#include <linux/signal.h>
27#include <linux/types.h>
28
29#include "pcie-designware.h"
30
31#define to_exynos_pcie(x) dev_get_drvdata((x)->dev)
32
33/* PCIe ELBI registers */
34#define PCIE_IRQ_PULSE 0x000
35#define IRQ_INTA_ASSERT BIT(0)
36#define IRQ_INTB_ASSERT BIT(2)
37#define IRQ_INTC_ASSERT BIT(4)
38#define IRQ_INTD_ASSERT BIT(6)
39#define PCIE_IRQ_LEVEL 0x004
40#define PCIE_IRQ_SPECIAL 0x008
41#define PCIE_IRQ_EN_PULSE 0x00c
42#define PCIE_IRQ_EN_LEVEL 0x010
43#define IRQ_MSI_ENABLE BIT(2)
44#define PCIE_IRQ_EN_SPECIAL 0x014
45#define PCIE_PWR_RESET 0x018
46#define PCIE_CORE_RESET 0x01c
47#define PCIE_CORE_RESET_ENABLE BIT(0)
48#define PCIE_STICKY_RESET 0x020
49#define PCIE_NONSTICKY_RESET 0x024
50#define PCIE_APP_INIT_RESET 0x028
51#define PCIE_APP_LTSSM_ENABLE 0x02c
52#define PCIE_ELBI_RDLH_LINKUP 0x064
53#define PCIE_ELBI_LTSSM_ENABLE 0x1
54#define PCIE_ELBI_SLV_AWMISC 0x11c
55#define PCIE_ELBI_SLV_ARMISC 0x120
56#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
57
58/* PCIe Purple registers */
59#define PCIE_PHY_GLOBAL_RESET 0x000
60#define PCIE_PHY_COMMON_RESET 0x004
61#define PCIE_PHY_CMN_REG 0x008
62#define PCIE_PHY_MAC_RESET 0x00c
63#define PCIE_PHY_PLL_LOCKED 0x010
64#define PCIE_PHY_TRSVREG_RESET 0x020
65#define PCIE_PHY_TRSV_RESET 0x024
66
67/* PCIe PHY registers */
68#define PCIE_PHY_IMPEDANCE 0x004
69#define PCIE_PHY_PLL_DIV_0 0x008
70#define PCIE_PHY_PLL_BIAS 0x00c
71#define PCIE_PHY_DCC_FEEDBACK 0x014
72#define PCIE_PHY_PLL_DIV_1 0x05c
73#define PCIE_PHY_COMMON_POWER 0x064
74#define PCIE_PHY_COMMON_PD_CMN BIT(3)
75#define PCIE_PHY_TRSV0_EMP_LVL 0x084
76#define PCIE_PHY_TRSV0_DRV_LVL 0x088
77#define PCIE_PHY_TRSV0_RXCDR 0x0ac
78#define PCIE_PHY_TRSV0_POWER 0x0c4
79#define PCIE_PHY_TRSV0_PD_TSV BIT(7)
80#define PCIE_PHY_TRSV0_LVCC 0x0dc
81#define PCIE_PHY_TRSV1_EMP_LVL 0x144
82#define PCIE_PHY_TRSV1_RXCDR 0x16c
83#define PCIE_PHY_TRSV1_POWER 0x184
84#define PCIE_PHY_TRSV1_PD_TSV BIT(7)
85#define PCIE_PHY_TRSV1_LVCC 0x19c
86#define PCIE_PHY_TRSV2_EMP_LVL 0x204
87#define PCIE_PHY_TRSV2_RXCDR 0x22c
88#define PCIE_PHY_TRSV2_POWER 0x244
89#define PCIE_PHY_TRSV2_PD_TSV BIT(7)
90#define PCIE_PHY_TRSV2_LVCC 0x25c
91#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
92#define PCIE_PHY_TRSV3_RXCDR 0x2ec
93#define PCIE_PHY_TRSV3_POWER 0x304
94#define PCIE_PHY_TRSV3_PD_TSV BIT(7)
95#define PCIE_PHY_TRSV3_LVCC 0x31c
96
97struct exynos_pcie_mem_res {
98 void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
99 void __iomem *phy_base; /* DT 1st resource: PHY CTRL */
100 void __iomem *block_base; /* DT 2nd resource: PHY ADDITIONAL CTRL */
101};
102
103struct exynos_pcie_clk_res {
104 struct clk *clk;
105 struct clk *bus_clk;
106};
107
108struct exynos_pcie {
109 struct dw_pcie *pci;
110 struct exynos_pcie_mem_res *mem_res;
111 struct exynos_pcie_clk_res *clk_res;
112 const struct exynos_pcie_ops *ops;
113 int reset_gpio;
114
115 /* For Generic PHY Framework */
116 bool using_phy;
117 struct phy *phy;
118};
119
120struct exynos_pcie_ops {
121 int (*get_mem_resources)(struct platform_device *pdev,
122 struct exynos_pcie *ep);
123 int (*get_clk_resources)(struct exynos_pcie *ep);
124 int (*init_clk_resources)(struct exynos_pcie *ep);
125 void (*deinit_clk_resources)(struct exynos_pcie *ep);
126};
127
128static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
129 struct exynos_pcie *ep)
130{
131 struct dw_pcie *pci = ep->pci;
132 struct device *dev = pci->dev;
133 struct resource *res;
134
135 /* If using the PHY framework, doesn't need to get other resource */
136 if (ep->using_phy)
137 return 0;
138
139 ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
140 if (!ep->mem_res)
141 return -ENOMEM;
142
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 ep->mem_res->elbi_base = devm_ioremap_resource(dev, res);
145 if (IS_ERR(ep->mem_res->elbi_base))
146 return PTR_ERR(ep->mem_res->elbi_base);
147
148 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
149 ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
150 if (IS_ERR(ep->mem_res->phy_base))
151 return PTR_ERR(ep->mem_res->phy_base);
152
153 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
154 ep->mem_res->block_base = devm_ioremap_resource(dev, res);
155 if (IS_ERR(ep->mem_res->block_base))
156 return PTR_ERR(ep->mem_res->block_base);
157
158 return 0;
159}
160
161static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
162{
163 struct dw_pcie *pci = ep->pci;
164 struct device *dev = pci->dev;
165
166 ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
167 if (!ep->clk_res)
168 return -ENOMEM;
169
170 ep->clk_res->clk = devm_clk_get(dev, "pcie");
171 if (IS_ERR(ep->clk_res->clk)) {
172 dev_err(dev, "Failed to get pcie rc clock\n");
173 return PTR_ERR(ep->clk_res->clk);
174 }
175
176 ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
177 if (IS_ERR(ep->clk_res->bus_clk)) {
178 dev_err(dev, "Failed to get pcie bus clock\n");
179 return PTR_ERR(ep->clk_res->bus_clk);
180 }
181
182 return 0;
183}
184
185static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
186{
187 struct dw_pcie *pci = ep->pci;
188 struct device *dev = pci->dev;
189 int ret;
190
191 ret = clk_prepare_enable(ep->clk_res->clk);
192 if (ret) {
193 dev_err(dev, "cannot enable pcie rc clock");
194 return ret;
195 }
196
197 ret = clk_prepare_enable(ep->clk_res->bus_clk);
198 if (ret) {
199 dev_err(dev, "cannot enable pcie bus clock");
200 goto err_bus_clk;
201 }
202
203 return 0;
204
205err_bus_clk:
206 clk_disable_unprepare(ep->clk_res->clk);
207
208 return ret;
209}
210
211static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
212{
213 clk_disable_unprepare(ep->clk_res->bus_clk);
214 clk_disable_unprepare(ep->clk_res->clk);
215}
216
217static const struct exynos_pcie_ops exynos5440_pcie_ops = {
218 .get_mem_resources = exynos5440_pcie_get_mem_resources,
219 .get_clk_resources = exynos5440_pcie_get_clk_resources,
220 .init_clk_resources = exynos5440_pcie_init_clk_resources,
221 .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources,
222};
223
224static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
225{
226 writel(val, base + reg);
227}
228
229static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
230{
231 return readl(base + reg);
232}
233
234static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
235{
236 u32 val;
237
238 val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
239 if (on)
240 val |= PCIE_ELBI_SLV_DBI_ENABLE;
241 else
242 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
243 exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
244}
245
246static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
247{
248 u32 val;
249
250 val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
251 if (on)
252 val |= PCIE_ELBI_SLV_DBI_ENABLE;
253 else
254 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
255 exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
256}
257
258static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
259{
260 u32 val;
261
262 val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
263 val &= ~PCIE_CORE_RESET_ENABLE;
264 exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
265 exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
266 exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
267 exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
268}
269
270static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
271{
272 u32 val;
273
274 val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
275 val |= PCIE_CORE_RESET_ENABLE;
276
277 exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
278 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
279 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
280 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
281 exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
282 exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_MAC_RESET);
283}
284
285static void exynos_pcie_assert_phy_reset(struct exynos_pcie *ep)
286{
287 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_MAC_RESET);
288 exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_GLOBAL_RESET);
289}
290
291static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *ep)
292{
293 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_GLOBAL_RESET);
294 exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_PWR_RESET);
295 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET);
296 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_CMN_REG);
297 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSVREG_RESET);
298 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSV_RESET);
299}
300
301static void exynos_pcie_power_on_phy(struct exynos_pcie *ep)
302{
303 u32 val;
304
305 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
306 val &= ~PCIE_PHY_COMMON_PD_CMN;
307 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
308
309 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
310 val &= ~PCIE_PHY_TRSV0_PD_TSV;
311 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
312
313 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
314 val &= ~PCIE_PHY_TRSV1_PD_TSV;
315 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
316
317 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
318 val &= ~PCIE_PHY_TRSV2_PD_TSV;
319 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
320
321 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
322 val &= ~PCIE_PHY_TRSV3_PD_TSV;
323 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
324}
325
326static void exynos_pcie_power_off_phy(struct exynos_pcie *ep)
327{
328 u32 val;
329
330 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
331 val |= PCIE_PHY_COMMON_PD_CMN;
332 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
333
334 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
335 val |= PCIE_PHY_TRSV0_PD_TSV;
336 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
337
338 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
339 val |= PCIE_PHY_TRSV1_PD_TSV;
340 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
341
342 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
343 val |= PCIE_PHY_TRSV2_PD_TSV;
344 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
345
346 val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
347 val |= PCIE_PHY_TRSV3_PD_TSV;
348 exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
349}
350
351static void exynos_pcie_init_phy(struct exynos_pcie *ep)
352{
353 /* DCC feedback control off */
354 exynos_pcie_writel(ep->mem_res->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK);
355
356 /* set TX/RX impedance */
357 exynos_pcie_writel(ep->mem_res->phy_base, 0xd5, PCIE_PHY_IMPEDANCE);
358
359 /* set 50Mhz PHY clock */
360 exynos_pcie_writel(ep->mem_res->phy_base, 0x14, PCIE_PHY_PLL_DIV_0);
361 exynos_pcie_writel(ep->mem_res->phy_base, 0x12, PCIE_PHY_PLL_DIV_1);
362
363 /* set TX Differential output for lane 0 */
364 exynos_pcie_writel(ep->mem_res->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
365
366 /* set TX Pre-emphasis Level Control for lane 0 to minimum */
367 exynos_pcie_writel(ep->mem_res->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
368
369 /* set RX clock and data recovery bandwidth */
370 exynos_pcie_writel(ep->mem_res->phy_base, 0xe7, PCIE_PHY_PLL_BIAS);
371 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR);
372 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR);
373 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR);
374 exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR);
375
376 /* change TX Pre-emphasis Level Control for lanes */
377 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
378 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
379 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
380 exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
381
382 /* set LVCC */
383 exynos_pcie_writel(ep->mem_res->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC);
384 exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC);
385 exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC);
386 exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC);
387}
388
389static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
390{
391 struct dw_pcie *pci = ep->pci;
392 struct device *dev = pci->dev;
393
394 if (ep->reset_gpio >= 0)
395 devm_gpio_request_one(dev, ep->reset_gpio,
396 GPIOF_OUT_INIT_HIGH, "RESET");
397}
398
399static int exynos_pcie_establish_link(struct exynos_pcie *ep)
400{
401 struct dw_pcie *pci = ep->pci;
402 struct pcie_port *pp = &pci->pp;
403 struct device *dev = pci->dev;
404 u32 val;
405
406 if (dw_pcie_link_up(pci)) {
407 dev_err(dev, "Link already up\n");
408 return 0;
409 }
410
411 exynos_pcie_assert_core_reset(ep);
412
413 if (ep->using_phy) {
414 phy_reset(ep->phy);
415
416 exynos_pcie_writel(ep->mem_res->elbi_base, 1,
417 PCIE_PWR_RESET);
418
419 phy_power_on(ep->phy);
420 phy_init(ep->phy);
421 } else {
422 exynos_pcie_assert_phy_reset(ep);
423 exynos_pcie_deassert_phy_reset(ep);
424 exynos_pcie_power_on_phy(ep);
425 exynos_pcie_init_phy(ep);
426
427 /* pulse for common reset */
428 exynos_pcie_writel(ep->mem_res->block_base, 1,
429 PCIE_PHY_COMMON_RESET);
430 udelay(500);
431 exynos_pcie_writel(ep->mem_res->block_base, 0,
432 PCIE_PHY_COMMON_RESET);
433 }
434
435 /* pulse for common reset */
436 exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_COMMON_RESET);
437 udelay(500);
438 exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET);
439
440 exynos_pcie_deassert_core_reset(ep);
441 dw_pcie_setup_rc(pp);
442 exynos_pcie_assert_reset(ep);
443
444 /* assert LTSSM enable */
445 exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
446 PCIE_APP_LTSSM_ENABLE);
447
448 /* check if the link is up or not */
449 if (!dw_pcie_wait_for_link(pci))
450 return 0;
451
452 if (ep->using_phy) {
453 phy_power_off(ep->phy);
454 return -ETIMEDOUT;
455 }
456
457 while (exynos_pcie_readl(ep->mem_res->phy_base,
458 PCIE_PHY_PLL_LOCKED) == 0) {
459 val = exynos_pcie_readl(ep->mem_res->block_base,
460 PCIE_PHY_PLL_LOCKED);
461 dev_info(dev, "PLL Locked: 0x%x\n", val);
462 }
463 exynos_pcie_power_off_phy(ep);
464 return -ETIMEDOUT;
465}
466
467static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
468{
469 u32 val;
470
471 val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
472 exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
473}
474
475static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
476{
477 u32 val;
478
479 /* enable INTX interrupt */
480 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
481 IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
482 exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
483}
484
485static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
486{
487 struct exynos_pcie *ep = arg;
488
489 exynos_pcie_clear_irq_pulse(ep);
490 return IRQ_HANDLED;
491}
492
493static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
494{
495 struct exynos_pcie *ep = arg;
496 struct dw_pcie *pci = ep->pci;
497 struct pcie_port *pp = &pci->pp;
498
499 return dw_handle_msi_irq(pp);
500}
501
502static void exynos_pcie_msi_init(struct exynos_pcie *ep)
503{
504 struct dw_pcie *pci = ep->pci;
505 struct pcie_port *pp = &pci->pp;
506 u32 val;
507
508 dw_pcie_msi_init(pp);
509
510 /* enable MSI interrupt */
511 val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
512 val |= IRQ_MSI_ENABLE;
513 exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
514}
515
516static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
517{
518 exynos_pcie_enable_irq_pulse(ep);
519
520 if (IS_ENABLED(CONFIG_PCI_MSI))
521 exynos_pcie_msi_init(ep);
522}
523
524static u32 exynos_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
525{
526 struct exynos_pcie *ep = to_exynos_pcie(pci);
527 u32 val;
528
529 exynos_pcie_sideband_dbi_r_mode(ep, true);
530 val = readl(pci->dbi_base + reg);
531 exynos_pcie_sideband_dbi_r_mode(ep, false);
532 return val;
533}
534
535static void exynos_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
536{
537 struct exynos_pcie *ep = to_exynos_pcie(pci);
538
539 exynos_pcie_sideband_dbi_w_mode(ep, true);
540 writel(val, pci->dbi_base + reg);
541 exynos_pcie_sideband_dbi_w_mode(ep, false);
542}
543
544static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
545 u32 *val)
546{
547 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
548 struct exynos_pcie *ep = to_exynos_pcie(pci);
549 int ret;
550
551 exynos_pcie_sideband_dbi_r_mode(ep, true);
552 ret = dw_pcie_read(pci->dbi_base + where, size, val);
553 exynos_pcie_sideband_dbi_r_mode(ep, false);
554 return ret;
555}
556
557static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
558 u32 val)
559{
560 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
561 struct exynos_pcie *ep = to_exynos_pcie(pci);
562 int ret;
563
564 exynos_pcie_sideband_dbi_w_mode(ep, true);
565 ret = dw_pcie_write(pci->dbi_base + where, size, val);
566 exynos_pcie_sideband_dbi_w_mode(ep, false);
567 return ret;
568}
569
570static int exynos_pcie_link_up(struct dw_pcie *pci)
571{
572 struct exynos_pcie *ep = to_exynos_pcie(pci);
573 u32 val;
574
575 val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
576 if (val == PCIE_ELBI_LTSSM_ENABLE)
577 return 1;
578
579 return 0;
580}
581
582static void exynos_pcie_host_init(struct pcie_port *pp)
583{
584 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
585 struct exynos_pcie *ep = to_exynos_pcie(pci);
586
587 exynos_pcie_establish_link(ep);
588 exynos_pcie_enable_interrupts(ep);
589}
590
591static struct dw_pcie_host_ops exynos_pcie_host_ops = {
592 .rd_own_conf = exynos_pcie_rd_own_conf,
593 .wr_own_conf = exynos_pcie_wr_own_conf,
594 .host_init = exynos_pcie_host_init,
595};
596
597static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
598 struct platform_device *pdev)
599{
600 struct dw_pcie *pci = ep->pci;
601 struct pcie_port *pp = &pci->pp;
602 struct device *dev = &pdev->dev;
603 int ret;
604
605 pp->irq = platform_get_irq(pdev, 1);
606 if (!pp->irq) {
607 dev_err(dev, "failed to get irq\n");
608 return -ENODEV;
609 }
610 ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
611 IRQF_SHARED, "exynos-pcie", ep);
612 if (ret) {
613 dev_err(dev, "failed to request irq\n");
614 return ret;
615 }
616
617 if (IS_ENABLED(CONFIG_PCI_MSI)) {
618 pp->msi_irq = platform_get_irq(pdev, 0);
619 if (!pp->msi_irq) {
620 dev_err(dev, "failed to get msi irq\n");
621 return -ENODEV;
622 }
623
624 ret = devm_request_irq(dev, pp->msi_irq,
625 exynos_pcie_msi_irq_handler,
626 IRQF_SHARED | IRQF_NO_THREAD,
627 "exynos-pcie", ep);
628 if (ret) {
629 dev_err(dev, "failed to request msi irq\n");
630 return ret;
631 }
632 }
633
634 pp->root_bus_nr = -1;
635 pp->ops = &exynos_pcie_host_ops;
636
637 ret = dw_pcie_host_init(pp);
638 if (ret) {
639 dev_err(dev, "failed to initialize host\n");
640 return ret;
641 }
642
643 return 0;
644}
645
646static const struct dw_pcie_ops dw_pcie_ops = {
647 .readl_dbi = exynos_pcie_readl_dbi,
648 .writel_dbi = exynos_pcie_writel_dbi,
649 .link_up = exynos_pcie_link_up,
650};
651
652static int __init exynos_pcie_probe(struct platform_device *pdev)
653{
654 struct device *dev = &pdev->dev;
655 struct dw_pcie *pci;
656 struct exynos_pcie *ep;
657 struct device_node *np = dev->of_node;
658 int ret;
659
660 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
661 if (!ep)
662 return -ENOMEM;
663
664 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
665 if (!pci)
666 return -ENOMEM;
667
668 pci->dev = dev;
669 pci->ops = &dw_pcie_ops;
670
671 ep->ops = (const struct exynos_pcie_ops *)
672 of_device_get_match_data(dev);
673
674 ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
675
676 /* Assume that controller doesn't use the PHY framework */
677 ep->using_phy = false;
678
679 ep->phy = devm_of_phy_get(dev, np, NULL);
680 if (IS_ERR(ep->phy)) {
681 if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
682 return PTR_ERR(ep->phy);
683 dev_warn(dev, "Use the 'phy' property. Current DT of pci-exynos was deprecated!!\n");
684 } else
685 ep->using_phy = true;
686
687 if (ep->ops && ep->ops->get_mem_resources) {
688 ret = ep->ops->get_mem_resources(pdev, ep);
689 if (ret)
690 return ret;
691 }
692
693 if (ep->ops && ep->ops->get_clk_resources) {
694 ret = ep->ops->get_clk_resources(ep);
695 if (ret)
696 return ret;
697 ret = ep->ops->init_clk_resources(ep);
698 if (ret)
699 return ret;
700 }
701
702 platform_set_drvdata(pdev, ep);
703
704 ret = exynos_add_pcie_port(ep, pdev);
705 if (ret < 0)
706 goto fail_probe;
707
708 return 0;
709
710fail_probe:
711 if (ep->using_phy)
712 phy_exit(ep->phy);
713
714 if (ep->ops && ep->ops->deinit_clk_resources)
715 ep->ops->deinit_clk_resources(ep);
716 return ret;
717}
718
719static int __exit exynos_pcie_remove(struct platform_device *pdev)
720{
721 struct exynos_pcie *ep = platform_get_drvdata(pdev);
722
723 if (ep->ops && ep->ops->deinit_clk_resources)
724 ep->ops->deinit_clk_resources(ep);
725
726 return 0;
727}
728
729static const struct of_device_id exynos_pcie_of_match[] = {
730 {
731 .compatible = "samsung,exynos5440-pcie",
732 .data = &exynos5440_pcie_ops
733 },
734 {},
735};
736
737static struct platform_driver exynos_pcie_driver = {
738 .remove = __exit_p(exynos_pcie_remove),
739 .driver = {
740 .name = "exynos-pcie",
741 .of_match_table = exynos_pcie_of_match,
742 },
743};
744
745/* Exynos PCIe driver does not allow module unload */
746
747static int __init exynos_pcie_init(void)
748{
749 return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
750}
751subsys_initcall(exynos_pcie_init);
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index c8cefb078218..3ab6761db9e8 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -30,7 +30,7 @@
30 30
31#include "pcie-designware.h" 31#include "pcie-designware.h"
32 32
33#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) 33#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
34 34
35enum imx6_pcie_variants { 35enum imx6_pcie_variants {
36 IMX6Q, 36 IMX6Q,
@@ -39,7 +39,7 @@ enum imx6_pcie_variants {
39}; 39};
40 40
41struct imx6_pcie { 41struct imx6_pcie {
42 struct pcie_port pp; /* pp.dbi_base is DT 0th resource */ 42 struct dw_pcie *pci;
43 int reset_gpio; 43 int reset_gpio;
44 bool gpio_active_high; 44 bool gpio_active_high;
45 struct clk *pcie_bus; 45 struct clk *pcie_bus;
@@ -97,13 +97,13 @@ struct imx6_pcie {
97 97
98static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) 98static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
99{ 99{
100 struct pcie_port *pp = &imx6_pcie->pp; 100 struct dw_pcie *pci = imx6_pcie->pci;
101 u32 val; 101 u32 val;
102 u32 max_iterations = 10; 102 u32 max_iterations = 10;
103 u32 wait_counter = 0; 103 u32 wait_counter = 0;
104 104
105 do { 105 do {
106 val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT); 106 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
107 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; 107 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
108 wait_counter++; 108 wait_counter++;
109 109
@@ -118,22 +118,22 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
118 118
119static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) 119static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
120{ 120{
121 struct pcie_port *pp = &imx6_pcie->pp; 121 struct dw_pcie *pci = imx6_pcie->pci;
122 u32 val; 122 u32 val;
123 int ret; 123 int ret;
124 124
125 val = addr << PCIE_PHY_CTRL_DATA_LOC; 125 val = addr << PCIE_PHY_CTRL_DATA_LOC;
126 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val); 126 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
127 127
128 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); 128 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
129 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val); 129 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
130 130
131 ret = pcie_phy_poll_ack(imx6_pcie, 1); 131 ret = pcie_phy_poll_ack(imx6_pcie, 1);
132 if (ret) 132 if (ret)
133 return ret; 133 return ret;
134 134
135 val = addr << PCIE_PHY_CTRL_DATA_LOC; 135 val = addr << PCIE_PHY_CTRL_DATA_LOC;
136 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val); 136 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
137 137
138 return pcie_phy_poll_ack(imx6_pcie, 0); 138 return pcie_phy_poll_ack(imx6_pcie, 0);
139} 139}
@@ -141,7 +141,7 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
141/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ 141/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
142static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) 142static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
143{ 143{
144 struct pcie_port *pp = &imx6_pcie->pp; 144 struct dw_pcie *pci = imx6_pcie->pci;
145 u32 val, phy_ctl; 145 u32 val, phy_ctl;
146 int ret; 146 int ret;
147 147
@@ -151,24 +151,24 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
151 151
152 /* assert Read signal */ 152 /* assert Read signal */
153 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; 153 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
154 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl); 154 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
155 155
156 ret = pcie_phy_poll_ack(imx6_pcie, 1); 156 ret = pcie_phy_poll_ack(imx6_pcie, 1);
157 if (ret) 157 if (ret)
158 return ret; 158 return ret;
159 159
160 val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT); 160 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
161 *data = val & 0xffff; 161 *data = val & 0xffff;
162 162
163 /* deassert Read signal */ 163 /* deassert Read signal */
164 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00); 164 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
165 165
166 return pcie_phy_poll_ack(imx6_pcie, 0); 166 return pcie_phy_poll_ack(imx6_pcie, 0);
167} 167}
168 168
169static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) 169static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
170{ 170{
171 struct pcie_port *pp = &imx6_pcie->pp; 171 struct dw_pcie *pci = imx6_pcie->pci;
172 u32 var; 172 u32 var;
173 int ret; 173 int ret;
174 174
@@ -179,11 +179,11 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
179 return ret; 179 return ret;
180 180
181 var = data << PCIE_PHY_CTRL_DATA_LOC; 181 var = data << PCIE_PHY_CTRL_DATA_LOC;
182 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); 182 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
183 183
184 /* capture data */ 184 /* capture data */
185 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); 185 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
186 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); 186 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
187 187
188 ret = pcie_phy_poll_ack(imx6_pcie, 1); 188 ret = pcie_phy_poll_ack(imx6_pcie, 1);
189 if (ret) 189 if (ret)
@@ -191,7 +191,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
191 191
192 /* deassert cap data */ 192 /* deassert cap data */
193 var = data << PCIE_PHY_CTRL_DATA_LOC; 193 var = data << PCIE_PHY_CTRL_DATA_LOC;
194 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); 194 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
195 195
196 /* wait for ack de-assertion */ 196 /* wait for ack de-assertion */
197 ret = pcie_phy_poll_ack(imx6_pcie, 0); 197 ret = pcie_phy_poll_ack(imx6_pcie, 0);
@@ -200,7 +200,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
200 200
201 /* assert wr signal */ 201 /* assert wr signal */
202 var = 0x1 << PCIE_PHY_CTRL_WR_LOC; 202 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
203 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); 203 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
204 204
205 /* wait for ack */ 205 /* wait for ack */
206 ret = pcie_phy_poll_ack(imx6_pcie, 1); 206 ret = pcie_phy_poll_ack(imx6_pcie, 1);
@@ -209,14 +209,14 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
209 209
210 /* deassert wr signal */ 210 /* deassert wr signal */
211 var = data << PCIE_PHY_CTRL_DATA_LOC; 211 var = data << PCIE_PHY_CTRL_DATA_LOC;
212 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var); 212 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
213 213
214 /* wait for ack de-assertion */ 214 /* wait for ack de-assertion */
215 ret = pcie_phy_poll_ack(imx6_pcie, 0); 215 ret = pcie_phy_poll_ack(imx6_pcie, 0);
216 if (ret) 216 if (ret)
217 return ret; 217 return ret;
218 218
219 dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0); 219 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
220 220
221 return 0; 221 return 0;
222} 222}
@@ -247,9 +247,6 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
247 247
248static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 248static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
249{ 249{
250 struct pcie_port *pp = &imx6_pcie->pp;
251 u32 val, gpr1, gpr12;
252
253 switch (imx6_pcie->variant) { 250 switch (imx6_pcie->variant) {
254 case IMX6SX: 251 case IMX6SX:
255 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 252 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
@@ -266,33 +263,6 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
266 IMX6Q_GPR1_PCIE_SW_RST); 263 IMX6Q_GPR1_PCIE_SW_RST);
267 break; 264 break;
268 case IMX6Q: 265 case IMX6Q:
269 /*
270 * If the bootloader already enabled the link we need some
271 * special handling to get the core back into a state where
272 * it is safe to touch it for configuration. As there is
273 * no dedicated reset signal wired up for MX6QDL, we need
274 * to manually force LTSSM into "detect" state before
275 * completely disabling LTSSM, which is a prerequisite for
276 * core configuration.
277 *
278 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
279 * have a strong indication that the bootloader activated
280 * the link.
281 */
282 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
283 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
284
285 if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
286 (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
287 val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR);
288 val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
289 val |= PCIE_PL_PFLR_FORCE_LINK;
290 dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val);
291
292 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
293 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
294 }
295
296 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 266 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
297 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); 267 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
298 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 268 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
@@ -303,8 +273,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
303 273
304static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) 274static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
305{ 275{
306 struct pcie_port *pp = &imx6_pcie->pp; 276 struct dw_pcie *pci = imx6_pcie->pci;
307 struct device *dev = pp->dev; 277 struct device *dev = pci->dev;
308 int ret = 0; 278 int ret = 0;
309 279
310 switch (imx6_pcie->variant) { 280 switch (imx6_pcie->variant) {
@@ -340,8 +310,8 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
340 310
341static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) 311static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
342{ 312{
343 struct pcie_port *pp = &imx6_pcie->pp; 313 struct dw_pcie *pci = imx6_pcie->pci;
344 struct device *dev = pp->dev; 314 struct device *dev = pci->dev;
345 int ret; 315 int ret;
346 316
347 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 317 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
@@ -440,28 +410,28 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
440 410
441static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) 411static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
442{ 412{
443 struct pcie_port *pp = &imx6_pcie->pp; 413 struct dw_pcie *pci = imx6_pcie->pci;
444 struct device *dev = pp->dev; 414 struct device *dev = pci->dev;
445 415
446 /* check if the link is up or not */ 416 /* check if the link is up or not */
447 if (!dw_pcie_wait_for_link(pp)) 417 if (!dw_pcie_wait_for_link(pci))
448 return 0; 418 return 0;
449 419
450 dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 420 dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
451 dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0), 421 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
452 dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1)); 422 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
453 return -ETIMEDOUT; 423 return -ETIMEDOUT;
454} 424}
455 425
456static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) 426static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
457{ 427{
458 struct pcie_port *pp = &imx6_pcie->pp; 428 struct dw_pcie *pci = imx6_pcie->pci;
459 struct device *dev = pp->dev; 429 struct device *dev = pci->dev;
460 u32 tmp; 430 u32 tmp;
461 unsigned int retries; 431 unsigned int retries;
462 432
463 for (retries = 0; retries < 200; retries++) { 433 for (retries = 0; retries < 200; retries++) {
464 tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL); 434 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
465 /* Test if the speed change finished. */ 435 /* Test if the speed change finished. */
466 if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 436 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
467 return 0; 437 return 0;
@@ -475,15 +445,16 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
475static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) 445static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
476{ 446{
477 struct imx6_pcie *imx6_pcie = arg; 447 struct imx6_pcie *imx6_pcie = arg;
478 struct pcie_port *pp = &imx6_pcie->pp; 448 struct dw_pcie *pci = imx6_pcie->pci;
449 struct pcie_port *pp = &pci->pp;
479 450
480 return dw_handle_msi_irq(pp); 451 return dw_handle_msi_irq(pp);
481} 452}
482 453
483static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) 454static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
484{ 455{
485 struct pcie_port *pp = &imx6_pcie->pp; 456 struct dw_pcie *pci = imx6_pcie->pci;
486 struct device *dev = pp->dev; 457 struct device *dev = pci->dev;
487 u32 tmp; 458 u32 tmp;
488 int ret; 459 int ret;
489 460
@@ -492,27 +463,25 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
492 * started in Gen2 mode, there is a possibility the devices on the 463 * started in Gen2 mode, there is a possibility the devices on the
493 * bus will not be detected at all. This happens with PCIe switches. 464 * bus will not be detected at all. This happens with PCIe switches.
494 */ 465 */
495 tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR); 466 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
496 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 467 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
497 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; 468 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
498 dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp); 469 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
499 470
500 /* Start LTSSM. */ 471 /* Start LTSSM. */
501 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 472 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
502 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); 473 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
503 474
504 ret = imx6_pcie_wait_for_link(imx6_pcie); 475 ret = imx6_pcie_wait_for_link(imx6_pcie);
505 if (ret) { 476 if (ret)
506 dev_info(dev, "Link never came up\n");
507 goto err_reset_phy; 477 goto err_reset_phy;
508 }
509 478
510 if (imx6_pcie->link_gen == 2) { 479 if (imx6_pcie->link_gen == 2) {
511 /* Allow Gen2 mode after the link is up. */ 480 /* Allow Gen2 mode after the link is up. */
512 tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR); 481 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
513 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 482 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
514 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; 483 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
515 dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp); 484 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
516 } else { 485 } else {
517 dev_info(dev, "Link: Gen2 disabled\n"); 486 dev_info(dev, "Link: Gen2 disabled\n");
518 } 487 }
@@ -521,9 +490,9 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
521 * Start Directed Speed Change so the best possible speed both link 490 * Start Directed Speed Change so the best possible speed both link
522 * partners support can be negotiated. 491 * partners support can be negotiated.
523 */ 492 */
524 tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL); 493 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
525 tmp |= PORT_LOGIC_SPEED_CHANGE; 494 tmp |= PORT_LOGIC_SPEED_CHANGE;
526 dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); 495 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
527 496
528 ret = imx6_pcie_wait_for_speed_change(imx6_pcie); 497 ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
529 if (ret) { 498 if (ret) {
@@ -538,21 +507,22 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
538 goto err_reset_phy; 507 goto err_reset_phy;
539 } 508 }
540 509
541 tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR); 510 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
542 dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); 511 dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
543 return 0; 512 return 0;
544 513
545err_reset_phy: 514err_reset_phy:
546 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", 515 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
547 dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0), 516 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
548 dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1)); 517 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
549 imx6_pcie_reset_phy(imx6_pcie); 518 imx6_pcie_reset_phy(imx6_pcie);
550 return ret; 519 return ret;
551} 520}
552 521
553static void imx6_pcie_host_init(struct pcie_port *pp) 522static void imx6_pcie_host_init(struct pcie_port *pp)
554{ 523{
555 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 524 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
525 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
556 526
557 imx6_pcie_assert_core_reset(imx6_pcie); 527 imx6_pcie_assert_core_reset(imx6_pcie);
558 imx6_pcie_init_phy(imx6_pcie); 528 imx6_pcie_init_phy(imx6_pcie);
@@ -564,22 +534,22 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
564 dw_pcie_msi_init(pp); 534 dw_pcie_msi_init(pp);
565} 535}
566 536
567static int imx6_pcie_link_up(struct pcie_port *pp) 537static int imx6_pcie_link_up(struct dw_pcie *pci)
568{ 538{
569 return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) & 539 return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
570 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; 540 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
571} 541}
572 542
573static struct pcie_host_ops imx6_pcie_host_ops = { 543static struct dw_pcie_host_ops imx6_pcie_host_ops = {
574 .link_up = imx6_pcie_link_up,
575 .host_init = imx6_pcie_host_init, 544 .host_init = imx6_pcie_host_init,
576}; 545};
577 546
578static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, 547static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
579 struct platform_device *pdev) 548 struct platform_device *pdev)
580{ 549{
581 struct pcie_port *pp = &imx6_pcie->pp; 550 struct dw_pcie *pci = imx6_pcie->pci;
582 struct device *dev = pp->dev; 551 struct pcie_port *pp = &pci->pp;
552 struct device *dev = &pdev->dev;
583 int ret; 553 int ret;
584 554
585 if (IS_ENABLED(CONFIG_PCI_MSI)) { 555 if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -611,11 +581,15 @@ static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
611 return 0; 581 return 0;
612} 582}
613 583
584static const struct dw_pcie_ops dw_pcie_ops = {
585 .link_up = imx6_pcie_link_up,
586};
587
614static int __init imx6_pcie_probe(struct platform_device *pdev) 588static int __init imx6_pcie_probe(struct platform_device *pdev)
615{ 589{
616 struct device *dev = &pdev->dev; 590 struct device *dev = &pdev->dev;
591 struct dw_pcie *pci;
617 struct imx6_pcie *imx6_pcie; 592 struct imx6_pcie *imx6_pcie;
618 struct pcie_port *pp;
619 struct resource *dbi_base; 593 struct resource *dbi_base;
620 struct device_node *node = dev->of_node; 594 struct device_node *node = dev->of_node;
621 int ret; 595 int ret;
@@ -624,8 +598,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
624 if (!imx6_pcie) 598 if (!imx6_pcie)
625 return -ENOMEM; 599 return -ENOMEM;
626 600
627 pp = &imx6_pcie->pp; 601 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
628 pp->dev = dev; 602 if (!pci)
603 return -ENOMEM;
604
605 pci->dev = dev;
606 pci->ops = &dw_pcie_ops;
629 607
630 imx6_pcie->variant = 608 imx6_pcie->variant =
631 (enum imx6_pcie_variants)of_device_get_match_data(dev); 609 (enum imx6_pcie_variants)of_device_get_match_data(dev);
@@ -635,9 +613,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
635 "imprecise external abort"); 613 "imprecise external abort");
636 614
637 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 615 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
638 pp->dbi_base = devm_ioremap_resource(dev, dbi_base); 616 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
639 if (IS_ERR(pp->dbi_base)) 617 if (IS_ERR(pci->dbi_base))
640 return PTR_ERR(pp->dbi_base); 618 return PTR_ERR(pci->dbi_base);
641 619
642 /* Fetch GPIOs */ 620 /* Fetch GPIOs */
643 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); 621 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
@@ -678,8 +656,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
678 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, 656 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
679 "pcie_inbound_axi"); 657 "pcie_inbound_axi");
680 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { 658 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
681 dev_err(dev, 659 dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
682 "pcie_incbound_axi clock missing or invalid\n");
683 return PTR_ERR(imx6_pcie->pcie_inbound_axi); 660 return PTR_ERR(imx6_pcie->pcie_inbound_axi);
684 } 661 }
685 } 662 }
@@ -719,11 +696,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
719 if (ret) 696 if (ret)
720 imx6_pcie->link_gen = 1; 697 imx6_pcie->link_gen = 1;
721 698
699 platform_set_drvdata(pdev, imx6_pcie);
700
722 ret = imx6_add_pcie_port(imx6_pcie, pdev); 701 ret = imx6_add_pcie_port(imx6_pcie, pdev);
723 if (ret < 0) 702 if (ret < 0)
724 return ret; 703 return ret;
725 704
726 platform_set_drvdata(pdev, imx6_pcie);
727 return 0; 705 return 0;
728} 706}
729 707
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c
index 9397c4667106..6b396f6b4615 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/dwc/pci-keystone-dw.c
@@ -72,7 +72,7 @@
72/* Config space registers */ 72/* Config space registers */
73#define DEBUG0 0x728 73#define DEBUG0 0x728
74 74
75#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) 75#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
76 76
77static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, 77static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
78 u32 *bit_pos) 78 u32 *bit_pos)
@@ -83,7 +83,8 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
83 83
84phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) 84phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
85{ 85{
86 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 86 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
87 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
87 88
88 return ks_pcie->app.start + MSI_IRQ; 89 return ks_pcie->app.start + MSI_IRQ;
89} 90}
@@ -100,8 +101,9 @@ static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
100 101
101void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) 102void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
102{ 103{
103 struct pcie_port *pp = &ks_pcie->pp; 104 struct dw_pcie *pci = ks_pcie->pci;
104 struct device *dev = pp->dev; 105 struct pcie_port *pp = &pci->pp;
106 struct device *dev = pci->dev;
105 u32 pending, vector; 107 u32 pending, vector;
106 int src, virq; 108 int src, virq;
107 109
@@ -128,10 +130,12 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
128 struct keystone_pcie *ks_pcie; 130 struct keystone_pcie *ks_pcie;
129 struct msi_desc *msi; 131 struct msi_desc *msi;
130 struct pcie_port *pp; 132 struct pcie_port *pp;
133 struct dw_pcie *pci;
131 134
132 msi = irq_data_get_msi_desc(d); 135 msi = irq_data_get_msi_desc(d);
133 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); 136 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
134 ks_pcie = to_keystone_pcie(pp); 137 pci = to_dw_pcie_from_pp(pp);
138 ks_pcie = to_keystone_pcie(pci);
135 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 139 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
136 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos); 140 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
137 141
@@ -143,7 +147,8 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
143void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) 147void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
144{ 148{
145 u32 reg_offset, bit_pos; 149 u32 reg_offset, bit_pos;
146 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 150 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
151 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
147 152
148 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos); 153 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
149 ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), 154 ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
@@ -153,7 +158,8 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
153void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) 158void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
154{ 159{
155 u32 reg_offset, bit_pos; 160 u32 reg_offset, bit_pos;
156 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 161 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
162 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
157 163
158 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos); 164 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
159 ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), 165 ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
@@ -165,11 +171,13 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
165 struct keystone_pcie *ks_pcie; 171 struct keystone_pcie *ks_pcie;
166 struct msi_desc *msi; 172 struct msi_desc *msi;
167 struct pcie_port *pp; 173 struct pcie_port *pp;
174 struct dw_pcie *pci;
168 u32 offset; 175 u32 offset;
169 176
170 msi = irq_data_get_msi_desc(d); 177 msi = irq_data_get_msi_desc(d);
171 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); 178 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
172 ks_pcie = to_keystone_pcie(pp); 179 pci = to_dw_pcie_from_pp(pp);
180 ks_pcie = to_keystone_pcie(pci);
173 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 181 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
174 182
175 /* Mask the end point if PVM implemented */ 183 /* Mask the end point if PVM implemented */
@@ -186,11 +194,13 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
186 struct keystone_pcie *ks_pcie; 194 struct keystone_pcie *ks_pcie;
187 struct msi_desc *msi; 195 struct msi_desc *msi;
188 struct pcie_port *pp; 196 struct pcie_port *pp;
197 struct dw_pcie *pci;
189 u32 offset; 198 u32 offset;
190 199
191 msi = irq_data_get_msi_desc(d); 200 msi = irq_data_get_msi_desc(d);
192 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); 201 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
193 ks_pcie = to_keystone_pcie(pp); 202 pci = to_dw_pcie_from_pp(pp);
203 ks_pcie = to_keystone_pcie(pci);
194 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 204 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
195 205
196 /* Mask the end point if PVM implemented */ 206 /* Mask the end point if PVM implemented */
@@ -225,8 +235,9 @@ static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
225 235
226int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) 236int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
227{ 237{
228 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 238 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
229 struct device *dev = pp->dev; 239 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
240 struct device *dev = pci->dev;
230 int i; 241 int i;
231 242
232 pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np, 243 pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
@@ -254,8 +265,8 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
254 265
255void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) 266void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
256{ 267{
257 struct pcie_port *pp = &ks_pcie->pp; 268 struct dw_pcie *pci = ks_pcie->pci;
258 struct device *dev = pp->dev; 269 struct device *dev = pci->dev;
259 u32 pending; 270 u32 pending;
260 int virq; 271 int virq;
261 272
@@ -285,7 +296,7 @@ irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
285 return IRQ_NONE; 296 return IRQ_NONE;
286 297
287 if (status & ERR_FATAL_IRQ) 298 if (status & ERR_FATAL_IRQ)
288 dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n", 299 dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n",
289 status); 300 status);
290 301
291 /* Ack the IRQ; status bits are RW1C */ 302 /* Ack the IRQ; status bits are RW1C */
@@ -366,15 +377,16 @@ static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
366 377
367void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) 378void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
368{ 379{
369 struct pcie_port *pp = &ks_pcie->pp; 380 struct dw_pcie *pci = ks_pcie->pci;
381 struct pcie_port *pp = &pci->pp;
370 u32 start = pp->mem->start, end = pp->mem->end; 382 u32 start = pp->mem->start, end = pp->mem->end;
371 int i, tr_size; 383 int i, tr_size;
372 u32 val; 384 u32 val;
373 385
374 /* Disable BARs for inbound access */ 386 /* Disable BARs for inbound access */
375 ks_dw_pcie_set_dbi_mode(ks_pcie); 387 ks_dw_pcie_set_dbi_mode(ks_pcie);
376 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0); 388 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
377 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0); 389 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
378 ks_dw_pcie_clear_dbi_mode(ks_pcie); 390 ks_dw_pcie_clear_dbi_mode(ks_pcie);
379 391
380 /* Set outbound translation size per window division */ 392 /* Set outbound translation size per window division */
@@ -415,11 +427,12 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
415 unsigned int devfn) 427 unsigned int devfn)
416{ 428{
417 u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); 429 u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
418 struct pcie_port *pp = &ks_pcie->pp; 430 struct dw_pcie *pci = ks_pcie->pci;
431 struct pcie_port *pp = &pci->pp;
419 u32 regval; 432 u32 regval;
420 433
421 if (bus == 0) 434 if (bus == 0)
422 return pp->dbi_base; 435 return pci->dbi_base;
423 436
424 regval = (bus << 16) | (device << 8) | function; 437 regval = (bus << 16) | (device << 8) | function;
425 438
@@ -438,25 +451,27 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
438int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 451int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
439 unsigned int devfn, int where, int size, u32 *val) 452 unsigned int devfn, int where, int size, u32 *val)
440{ 453{
441 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 454 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
455 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
442 u8 bus_num = bus->number; 456 u8 bus_num = bus->number;
443 void __iomem *addr; 457 void __iomem *addr;
444 458
445 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); 459 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
446 460
447 return dw_pcie_cfg_read(addr + where, size, val); 461 return dw_pcie_read(addr + where, size, val);
448} 462}
449 463
450int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 464int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
451 unsigned int devfn, int where, int size, u32 val) 465 unsigned int devfn, int where, int size, u32 val)
452{ 466{
453 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 467 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
468 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
454 u8 bus_num = bus->number; 469 u8 bus_num = bus->number;
455 void __iomem *addr; 470 void __iomem *addr;
456 471
457 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); 472 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
458 473
459 return dw_pcie_cfg_write(addr + where, size, val); 474 return dw_pcie_write(addr + where, size, val);
460} 475}
461 476
462/** 477/**
@@ -466,14 +481,15 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
466 */ 481 */
467void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) 482void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
468{ 483{
469 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 484 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
485 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
470 486
471 /* Configure and set up BAR0 */ 487 /* Configure and set up BAR0 */
472 ks_dw_pcie_set_dbi_mode(ks_pcie); 488 ks_dw_pcie_set_dbi_mode(ks_pcie);
473 489
474 /* Enable BAR0 */ 490 /* Enable BAR0 */
475 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1); 491 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
476 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1); 492 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
477 493
478 ks_dw_pcie_clear_dbi_mode(ks_pcie); 494 ks_dw_pcie_clear_dbi_mode(ks_pcie);
479 495
@@ -481,17 +497,17 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
481 * For BAR0, just setting bus address for inbound writes (MSI) should 497 * For BAR0, just setting bus address for inbound writes (MSI) should
482 * be sufficient. Use physical address to avoid any conflicts. 498 * be sufficient. Use physical address to avoid any conflicts.
483 */ 499 */
484 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start); 500 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
485} 501}
486 502
487/** 503/**
488 * ks_dw_pcie_link_up() - Check if link up 504 * ks_dw_pcie_link_up() - Check if link up
489 */ 505 */
490int ks_dw_pcie_link_up(struct pcie_port *pp) 506int ks_dw_pcie_link_up(struct dw_pcie *pci)
491{ 507{
492 u32 val; 508 u32 val;
493 509
494 val = dw_pcie_readl_rc(pp, DEBUG0); 510 val = dw_pcie_readl_dbi(pci, DEBUG0);
495 return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; 511 return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
496} 512}
497 513
@@ -519,22 +535,23 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
519int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, 535int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
520 struct device_node *msi_intc_np) 536 struct device_node *msi_intc_np)
521{ 537{
522 struct pcie_port *pp = &ks_pcie->pp; 538 struct dw_pcie *pci = ks_pcie->pci;
523 struct device *dev = pp->dev; 539 struct pcie_port *pp = &pci->pp;
540 struct device *dev = pci->dev;
524 struct platform_device *pdev = to_platform_device(dev); 541 struct platform_device *pdev = to_platform_device(dev);
525 struct resource *res; 542 struct resource *res;
526 543
527 /* Index 0 is the config reg. space address */ 544 /* Index 0 is the config reg. space address */
528 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 545 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
529 pp->dbi_base = devm_ioremap_resource(dev, res); 546 pci->dbi_base = devm_ioremap_resource(dev, res);
530 if (IS_ERR(pp->dbi_base)) 547 if (IS_ERR(pci->dbi_base))
531 return PTR_ERR(pp->dbi_base); 548 return PTR_ERR(pci->dbi_base);
532 549
533 /* 550 /*
534 * We set these same and is used in pcie rd/wr_other_conf 551 * We set these same and is used in pcie rd/wr_other_conf
535 * functions 552 * functions
536 */ 553 */
537 pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET; 554 pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
538 pp->va_cfg1_base = pp->va_cfg0_base; 555 pp->va_cfg1_base = pp->va_cfg0_base;
539 556
540 /* Index 1 is the application reg. space address */ 557 /* Index 1 is the application reg. space address */
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index 043c19a05da1..8dc66409182d 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -44,7 +44,7 @@
44#define PCIE_RC_K2E 0xb009 44#define PCIE_RC_K2E 0xb009
45#define PCIE_RC_K2L 0xb00a 45#define PCIE_RC_K2L 0xb00a
46 46
47#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) 47#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
48 48
49static void quirk_limit_mrrs(struct pci_dev *dev) 49static void quirk_limit_mrrs(struct pci_dev *dev)
50{ 50{
@@ -88,13 +88,14 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
88 88
89static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) 89static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
90{ 90{
91 struct pcie_port *pp = &ks_pcie->pp; 91 struct dw_pcie *pci = ks_pcie->pci;
92 struct device *dev = pp->dev; 92 struct pcie_port *pp = &pci->pp;
93 struct device *dev = pci->dev;
93 unsigned int retries; 94 unsigned int retries;
94 95
95 dw_pcie_setup_rc(pp); 96 dw_pcie_setup_rc(pp);
96 97
97 if (dw_pcie_link_up(pp)) { 98 if (dw_pcie_link_up(pci)) {
98 dev_err(dev, "Link already up\n"); 99 dev_err(dev, "Link already up\n");
99 return 0; 100 return 0;
100 } 101 }
@@ -102,7 +103,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
102 /* check if the link is up or not */ 103 /* check if the link is up or not */
103 for (retries = 0; retries < 5; retries++) { 104 for (retries = 0; retries < 5; retries++) {
104 ks_dw_pcie_initiate_link_train(ks_pcie); 105 ks_dw_pcie_initiate_link_train(ks_pcie);
105 if (!dw_pcie_wait_for_link(pp)) 106 if (!dw_pcie_wait_for_link(pci))
106 return 0; 107 return 0;
107 } 108 }
108 109
@@ -115,8 +116,8 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
115 unsigned int irq = irq_desc_get_irq(desc); 116 unsigned int irq = irq_desc_get_irq(desc);
116 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 117 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
117 u32 offset = irq - ks_pcie->msi_host_irqs[0]; 118 u32 offset = irq - ks_pcie->msi_host_irqs[0];
118 struct pcie_port *pp = &ks_pcie->pp; 119 struct dw_pcie *pci = ks_pcie->pci;
119 struct device *dev = pp->dev; 120 struct device *dev = pci->dev;
120 struct irq_chip *chip = irq_desc_get_chip(desc); 121 struct irq_chip *chip = irq_desc_get_chip(desc);
121 122
122 dev_dbg(dev, "%s, irq %d\n", __func__, irq); 123 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -143,8 +144,8 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
143{ 144{
144 unsigned int irq = irq_desc_get_irq(desc); 145 unsigned int irq = irq_desc_get_irq(desc);
145 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 146 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
146 struct pcie_port *pp = &ks_pcie->pp; 147 struct dw_pcie *pci = ks_pcie->pci;
147 struct device *dev = pp->dev; 148 struct device *dev = pci->dev;
148 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; 149 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
149 struct irq_chip *chip = irq_desc_get_chip(desc); 150 struct irq_chip *chip = irq_desc_get_chip(desc);
150 151
@@ -164,7 +165,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
164 char *controller, int *num_irqs) 165 char *controller, int *num_irqs)
165{ 166{
166 int temp, max_host_irqs, legacy = 1, *host_irqs; 167 int temp, max_host_irqs, legacy = 1, *host_irqs;
167 struct device *dev = ks_pcie->pp.dev; 168 struct device *dev = ks_pcie->pci->dev;
168 struct device_node *np_pcie = dev->of_node, **np_temp; 169 struct device_node *np_pcie = dev->of_node, **np_temp;
169 170
170 if (!strcmp(controller, "msi-interrupt-controller")) 171 if (!strcmp(controller, "msi-interrupt-controller"))
@@ -262,24 +263,25 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
262 263
263static void __init ks_pcie_host_init(struct pcie_port *pp) 264static void __init ks_pcie_host_init(struct pcie_port *pp)
264{ 265{
265 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); 266 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
267 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
266 u32 val; 268 u32 val;
267 269
268 ks_pcie_establish_link(ks_pcie); 270 ks_pcie_establish_link(ks_pcie);
269 ks_dw_pcie_setup_rc_app_regs(ks_pcie); 271 ks_dw_pcie_setup_rc_app_regs(ks_pcie);
270 ks_pcie_setup_interrupts(ks_pcie); 272 ks_pcie_setup_interrupts(ks_pcie);
271 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), 273 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
272 pp->dbi_base + PCI_IO_BASE); 274 pci->dbi_base + PCI_IO_BASE);
273 275
274 /* update the Vendor ID */ 276 /* update the Vendor ID */
275 writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID); 277 writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID);
276 278
277 /* update the DEV_STAT_CTRL to publish right mrrs */ 279 /* update the DEV_STAT_CTRL to publish right mrrs */
278 val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); 280 val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
279 val &= ~PCI_EXP_DEVCTL_READRQ; 281 val &= ~PCI_EXP_DEVCTL_READRQ;
280 /* set the mrrs to 256 bytes */ 282 /* set the mrrs to 256 bytes */
281 val |= BIT(12); 283 val |= BIT(12);
282 writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); 284 writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
283 285
284 /* 286 /*
285 * PCIe access errors that result into OCP errors are caught by ARM as 287 * PCIe access errors that result into OCP errors are caught by ARM as
@@ -289,10 +291,9 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
289 "Asynchronous external abort"); 291 "Asynchronous external abort");
290} 292}
291 293
292static struct pcie_host_ops keystone_pcie_host_ops = { 294static struct dw_pcie_host_ops keystone_pcie_host_ops = {
293 .rd_other_conf = ks_dw_pcie_rd_other_conf, 295 .rd_other_conf = ks_dw_pcie_rd_other_conf,
294 .wr_other_conf = ks_dw_pcie_wr_other_conf, 296 .wr_other_conf = ks_dw_pcie_wr_other_conf,
295 .link_up = ks_dw_pcie_link_up,
296 .host_init = ks_pcie_host_init, 297 .host_init = ks_pcie_host_init,
297 .msi_set_irq = ks_dw_pcie_msi_set_irq, 298 .msi_set_irq = ks_dw_pcie_msi_set_irq,
298 .msi_clear_irq = ks_dw_pcie_msi_clear_irq, 299 .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
@@ -311,8 +312,9 @@ static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
311static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, 312static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
312 struct platform_device *pdev) 313 struct platform_device *pdev)
313{ 314{
314 struct pcie_port *pp = &ks_pcie->pp; 315 struct dw_pcie *pci = ks_pcie->pci;
315 struct device *dev = pp->dev; 316 struct pcie_port *pp = &pci->pp;
317 struct device *dev = &pdev->dev;
316 int ret; 318 int ret;
317 319
318 ret = ks_pcie_get_irq_controller_info(ks_pcie, 320 ret = ks_pcie_get_irq_controller_info(ks_pcie,
@@ -365,6 +367,10 @@ static const struct of_device_id ks_pcie_of_match[] = {
365 { }, 367 { },
366}; 368};
367 369
370static const struct dw_pcie_ops dw_pcie_ops = {
371 .link_up = ks_dw_pcie_link_up,
372};
373
368static int __exit ks_pcie_remove(struct platform_device *pdev) 374static int __exit ks_pcie_remove(struct platform_device *pdev)
369{ 375{
370 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); 376 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
@@ -377,8 +383,8 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
377static int __init ks_pcie_probe(struct platform_device *pdev) 383static int __init ks_pcie_probe(struct platform_device *pdev)
378{ 384{
379 struct device *dev = &pdev->dev; 385 struct device *dev = &pdev->dev;
386 struct dw_pcie *pci;
380 struct keystone_pcie *ks_pcie; 387 struct keystone_pcie *ks_pcie;
381 struct pcie_port *pp;
382 struct resource *res; 388 struct resource *res;
383 void __iomem *reg_p; 389 void __iomem *reg_p;
384 struct phy *phy; 390 struct phy *phy;
@@ -388,8 +394,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
388 if (!ks_pcie) 394 if (!ks_pcie)
389 return -ENOMEM; 395 return -ENOMEM;
390 396
391 pp = &ks_pcie->pp; 397 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
392 pp->dev = dev; 398 if (!pci)
399 return -ENOMEM;
400
401 pci->dev = dev;
402 pci->ops = &dw_pcie_ops;
393 403
394 /* initialize SerDes Phy if present */ 404 /* initialize SerDes Phy if present */
395 phy = devm_phy_get(dev, "pcie-phy"); 405 phy = devm_phy_get(dev, "pcie-phy");
@@ -422,6 +432,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
422 if (ret) 432 if (ret)
423 return ret; 433 return ret;
424 434
435 platform_set_drvdata(pdev, ks_pcie);
436
425 ret = ks_add_pcie_port(ks_pcie, pdev); 437 ret = ks_add_pcie_port(ks_pcie, pdev);
426 if (ret < 0) 438 if (ret < 0)
427 goto fail_clk; 439 goto fail_clk;
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/dwc/pci-keystone.h
index bc54bafda068..74c5825882df 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/dwc/pci-keystone.h
@@ -17,7 +17,7 @@
17#define MAX_LEGACY_HOST_IRQS 4 17#define MAX_LEGACY_HOST_IRQS 4
18 18
19struct keystone_pcie { 19struct keystone_pcie {
20 struct pcie_port pp; /* pp.dbi_base is DT 0th res */ 20 struct dw_pcie *pci;
21 struct clk *clk; 21 struct clk *clk;
22 /* PCI Device ID */ 22 /* PCI Device ID */
23 u32 device_id; 23 u32 device_id;
@@ -54,10 +54,10 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
54int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 54int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
55 unsigned int devfn, int where, int size, u32 *val); 55 unsigned int devfn, int where, int size, u32 *val);
56void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); 56void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
57int ks_dw_pcie_link_up(struct pcie_port *pp);
58void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); 57void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
59void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); 58void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
60void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); 59void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
61void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); 60void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
62int ks_dw_pcie_msi_host_init(struct pcie_port *pp, 61int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
63 struct msi_controller *chip); 62 struct msi_controller *chip);
63int ks_dw_pcie_link_up(struct dw_pcie *pci);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index c1f06f8f05fd..175c09e3a932 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -39,24 +39,26 @@ struct ls_pcie_drvdata {
39 u32 lut_offset; 39 u32 lut_offset;
40 u32 ltssm_shift; 40 u32 ltssm_shift;
41 u32 lut_dbg; 41 u32 lut_dbg;
42 struct pcie_host_ops *ops; 42 struct dw_pcie_host_ops *ops;
43 const struct dw_pcie_ops *dw_pcie_ops;
43}; 44};
44 45
45struct ls_pcie { 46struct ls_pcie {
46 struct pcie_port pp; /* pp.dbi_base is DT regs */ 47 struct dw_pcie *pci;
47 void __iomem *lut; 48 void __iomem *lut;
48 struct regmap *scfg; 49 struct regmap *scfg;
49 const struct ls_pcie_drvdata *drvdata; 50 const struct ls_pcie_drvdata *drvdata;
50 int index; 51 int index;
51}; 52};
52 53
53#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) 54#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
54 55
55static bool ls_pcie_is_bridge(struct ls_pcie *pcie) 56static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
56{ 57{
58 struct dw_pcie *pci = pcie->pci;
57 u32 header_type; 59 u32 header_type;
58 60
59 header_type = ioread8(pcie->pp.dbi_base + PCI_HEADER_TYPE); 61 header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
60 header_type &= 0x7f; 62 header_type &= 0x7f;
61 63
62 return header_type == PCI_HEADER_TYPE_BRIDGE; 64 return header_type == PCI_HEADER_TYPE_BRIDGE;
@@ -65,29 +67,34 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
65/* Clear multi-function bit */ 67/* Clear multi-function bit */
66static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) 68static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
67{ 69{
68 iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->pp.dbi_base + PCI_HEADER_TYPE); 70 struct dw_pcie *pci = pcie->pci;
71
72 iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE);
69} 73}
70 74
71/* Fix class value */ 75/* Fix class value */
72static void ls_pcie_fix_class(struct ls_pcie *pcie) 76static void ls_pcie_fix_class(struct ls_pcie *pcie)
73{ 77{
74 iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->pp.dbi_base + PCI_CLASS_DEVICE); 78 struct dw_pcie *pci = pcie->pci;
79
80 iowrite16(PCI_CLASS_BRIDGE_PCI, pci->dbi_base + PCI_CLASS_DEVICE);
75} 81}
76 82
77/* Drop MSG TLP except for Vendor MSG */ 83/* Drop MSG TLP except for Vendor MSG */
78static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) 84static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
79{ 85{
80 u32 val; 86 u32 val;
87 struct dw_pcie *pci = pcie->pci;
81 88
82 val = ioread32(pcie->pp.dbi_base + PCIE_STRFMR1); 89 val = ioread32(pci->dbi_base + PCIE_STRFMR1);
83 val &= 0xDFFFFFFF; 90 val &= 0xDFFFFFFF;
84 iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1); 91 iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
85} 92}
86 93
87static int ls1021_pcie_link_up(struct pcie_port *pp) 94static int ls1021_pcie_link_up(struct dw_pcie *pci)
88{ 95{
89 u32 state; 96 u32 state;
90 struct ls_pcie *pcie = to_ls_pcie(pp); 97 struct ls_pcie *pcie = to_ls_pcie(pci);
91 98
92 if (!pcie->scfg) 99 if (!pcie->scfg)
93 return 0; 100 return 0;
@@ -103,8 +110,9 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
103 110
104static void ls1021_pcie_host_init(struct pcie_port *pp) 111static void ls1021_pcie_host_init(struct pcie_port *pp)
105{ 112{
106 struct device *dev = pp->dev; 113 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
107 struct ls_pcie *pcie = to_ls_pcie(pp); 114 struct ls_pcie *pcie = to_ls_pcie(pci);
115 struct device *dev = pci->dev;
108 u32 index[2]; 116 u32 index[2];
109 117
110 pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, 118 pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
@@ -127,9 +135,9 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
127 ls_pcie_drop_msg_tlp(pcie); 135 ls_pcie_drop_msg_tlp(pcie);
128} 136}
129 137
130static int ls_pcie_link_up(struct pcie_port *pp) 138static int ls_pcie_link_up(struct dw_pcie *pci)
131{ 139{
132 struct ls_pcie *pcie = to_ls_pcie(pp); 140 struct ls_pcie *pcie = to_ls_pcie(pci);
133 u32 state; 141 u32 state;
134 142
135 state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> 143 state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
@@ -144,19 +152,21 @@ static int ls_pcie_link_up(struct pcie_port *pp)
144 152
145static void ls_pcie_host_init(struct pcie_port *pp) 153static void ls_pcie_host_init(struct pcie_port *pp)
146{ 154{
147 struct ls_pcie *pcie = to_ls_pcie(pp); 155 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
156 struct ls_pcie *pcie = to_ls_pcie(pci);
148 157
149 iowrite32(1, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN); 158 iowrite32(1, pci->dbi_base + PCIE_DBI_RO_WR_EN);
150 ls_pcie_fix_class(pcie); 159 ls_pcie_fix_class(pcie);
151 ls_pcie_clear_multifunction(pcie); 160 ls_pcie_clear_multifunction(pcie);
152 ls_pcie_drop_msg_tlp(pcie); 161 ls_pcie_drop_msg_tlp(pcie);
153 iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN); 162 iowrite32(0, pci->dbi_base + PCIE_DBI_RO_WR_EN);
154} 163}
155 164
156static int ls_pcie_msi_host_init(struct pcie_port *pp, 165static int ls_pcie_msi_host_init(struct pcie_port *pp,
157 struct msi_controller *chip) 166 struct msi_controller *chip)
158{ 167{
159 struct device *dev = pp->dev; 168 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
169 struct device *dev = pci->dev;
160 struct device_node *np = dev->of_node; 170 struct device_node *np = dev->of_node;
161 struct device_node *msi_node; 171 struct device_node *msi_node;
162 172
@@ -175,20 +185,27 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
175 return 0; 185 return 0;
176} 186}
177 187
178static struct pcie_host_ops ls1021_pcie_host_ops = { 188static struct dw_pcie_host_ops ls1021_pcie_host_ops = {
179 .link_up = ls1021_pcie_link_up,
180 .host_init = ls1021_pcie_host_init, 189 .host_init = ls1021_pcie_host_init,
181 .msi_host_init = ls_pcie_msi_host_init, 190 .msi_host_init = ls_pcie_msi_host_init,
182}; 191};
183 192
184static struct pcie_host_ops ls_pcie_host_ops = { 193static struct dw_pcie_host_ops ls_pcie_host_ops = {
185 .link_up = ls_pcie_link_up,
186 .host_init = ls_pcie_host_init, 194 .host_init = ls_pcie_host_init,
187 .msi_host_init = ls_pcie_msi_host_init, 195 .msi_host_init = ls_pcie_msi_host_init,
188}; 196};
189 197
198static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
199 .link_up = ls1021_pcie_link_up,
200};
201
202static const struct dw_pcie_ops dw_ls_pcie_ops = {
203 .link_up = ls_pcie_link_up,
204};
205
190static struct ls_pcie_drvdata ls1021_drvdata = { 206static struct ls_pcie_drvdata ls1021_drvdata = {
191 .ops = &ls1021_pcie_host_ops, 207 .ops = &ls1021_pcie_host_ops,
208 .dw_pcie_ops = &dw_ls1021_pcie_ops,
192}; 209};
193 210
194static struct ls_pcie_drvdata ls1043_drvdata = { 211static struct ls_pcie_drvdata ls1043_drvdata = {
@@ -196,6 +213,7 @@ static struct ls_pcie_drvdata ls1043_drvdata = {
196 .ltssm_shift = 24, 213 .ltssm_shift = 24,
197 .lut_dbg = 0x7fc, 214 .lut_dbg = 0x7fc,
198 .ops = &ls_pcie_host_ops, 215 .ops = &ls_pcie_host_ops,
216 .dw_pcie_ops = &dw_ls_pcie_ops,
199}; 217};
200 218
201static struct ls_pcie_drvdata ls1046_drvdata = { 219static struct ls_pcie_drvdata ls1046_drvdata = {
@@ -203,6 +221,7 @@ static struct ls_pcie_drvdata ls1046_drvdata = {
203 .ltssm_shift = 24, 221 .ltssm_shift = 24,
204 .lut_dbg = 0x407fc, 222 .lut_dbg = 0x407fc,
205 .ops = &ls_pcie_host_ops, 223 .ops = &ls_pcie_host_ops,
224 .dw_pcie_ops = &dw_ls_pcie_ops,
206}; 225};
207 226
208static struct ls_pcie_drvdata ls2080_drvdata = { 227static struct ls_pcie_drvdata ls2080_drvdata = {
@@ -210,6 +229,7 @@ static struct ls_pcie_drvdata ls2080_drvdata = {
210 .ltssm_shift = 0, 229 .ltssm_shift = 0,
211 .lut_dbg = 0x7fc, 230 .lut_dbg = 0x7fc,
212 .ops = &ls_pcie_host_ops, 231 .ops = &ls_pcie_host_ops,
232 .dw_pcie_ops = &dw_ls_pcie_ops,
213}; 233};
214 234
215static const struct of_device_id ls_pcie_of_match[] = { 235static const struct of_device_id ls_pcie_of_match[] = {
@@ -223,10 +243,13 @@ static const struct of_device_id ls_pcie_of_match[] = {
223 243
224static int __init ls_add_pcie_port(struct ls_pcie *pcie) 244static int __init ls_add_pcie_port(struct ls_pcie *pcie)
225{ 245{
226 struct pcie_port *pp = &pcie->pp; 246 struct dw_pcie *pci = pcie->pci;
227 struct device *dev = pp->dev; 247 struct pcie_port *pp = &pci->pp;
248 struct device *dev = pci->dev;
228 int ret; 249 int ret;
229 250
251 pp->ops = pcie->drvdata->ops;
252
230 ret = dw_pcie_host_init(pp); 253 ret = dw_pcie_host_init(pp);
231 if (ret) { 254 if (ret) {
232 dev_err(dev, "failed to initialize host\n"); 255 dev_err(dev, "failed to initialize host\n");
@@ -239,8 +262,8 @@ static int __init ls_add_pcie_port(struct ls_pcie *pcie)
239static int __init ls_pcie_probe(struct platform_device *pdev) 262static int __init ls_pcie_probe(struct platform_device *pdev)
240{ 263{
241 struct device *dev = &pdev->dev; 264 struct device *dev = &pdev->dev;
265 struct dw_pcie *pci;
242 struct ls_pcie *pcie; 266 struct ls_pcie *pcie;
243 struct pcie_port *pp;
244 struct resource *dbi_base; 267 struct resource *dbi_base;
245 int ret; 268 int ret;
246 269
@@ -248,21 +271,27 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
248 if (!pcie) 271 if (!pcie)
249 return -ENOMEM; 272 return -ENOMEM;
250 273
251 pp = &pcie->pp; 274 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
252 pp->dev = dev; 275 if (!pci)
276 return -ENOMEM;
277
253 pcie->drvdata = of_device_get_match_data(dev); 278 pcie->drvdata = of_device_get_match_data(dev);
254 pp->ops = pcie->drvdata->ops; 279
280 pci->dev = dev;
281 pci->ops = pcie->drvdata->dw_pcie_ops;
255 282
256 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 283 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
257 pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base); 284 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
258 if (IS_ERR(pcie->pp.dbi_base)) 285 if (IS_ERR(pci->dbi_base))
259 return PTR_ERR(pcie->pp.dbi_base); 286 return PTR_ERR(pci->dbi_base);
260 287
261 pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset; 288 pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
262 289
263 if (!ls_pcie_is_bridge(pcie)) 290 if (!ls_pcie_is_bridge(pcie))
264 return -ENODEV; 291 return -ENODEV;
265 292
293 platform_set_drvdata(pdev, pcie);
294
266 ret = ls_add_pcie_port(pcie); 295 ret = ls_add_pcie_port(pcie);
267 if (ret < 0) 296 if (ret < 0)
268 return ret; 297 return ret;
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index 0ac0f18690f2..66bac6fbfa9f 100644
--- a/drivers/pci/host/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -29,7 +29,7 @@
29#include "pcie-designware.h" 29#include "pcie-designware.h"
30 30
31struct armada8k_pcie { 31struct armada8k_pcie {
32 struct pcie_port pp; /* pp.dbi_base is DT ctrl */ 32 struct dw_pcie *pci;
33 struct clk *clk; 33 struct clk *clk;
34}; 34};
35 35
@@ -67,76 +67,77 @@ struct armada8k_pcie {
67#define AX_USER_DOMAIN_MASK 0x3 67#define AX_USER_DOMAIN_MASK 0x3
68#define AX_USER_DOMAIN_SHIFT 4 68#define AX_USER_DOMAIN_SHIFT 4
69 69
70#define to_armada8k_pcie(x) container_of(x, struct armada8k_pcie, pp) 70#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev)
71 71
72static int armada8k_pcie_link_up(struct pcie_port *pp) 72static int armada8k_pcie_link_up(struct dw_pcie *pci)
73{ 73{
74 u32 reg; 74 u32 reg;
75 u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; 75 u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
76 76
77 reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_STATUS_REG); 77 reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
78 78
79 if ((reg & mask) == mask) 79 if ((reg & mask) == mask)
80 return 1; 80 return 1;
81 81
82 dev_dbg(pp->dev, "No link detected (Global-Status: 0x%08x).\n", reg); 82 dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
83 return 0; 83 return 0;
84} 84}
85 85
86static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) 86static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
87{ 87{
88 struct pcie_port *pp = &pcie->pp; 88 struct dw_pcie *pci = pcie->pci;
89 u32 reg; 89 u32 reg;
90 90
91 if (!dw_pcie_link_up(pp)) { 91 if (!dw_pcie_link_up(pci)) {
92 /* Disable LTSSM state machine to enable configuration */ 92 /* Disable LTSSM state machine to enable configuration */
93 reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG); 93 reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
94 reg &= ~(PCIE_APP_LTSSM_EN); 94 reg &= ~(PCIE_APP_LTSSM_EN);
95 dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg); 95 dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
96 } 96 }
97 97
98 /* Set the device to root complex mode */ 98 /* Set the device to root complex mode */
99 reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG); 99 reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
100 reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); 100 reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
101 reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; 101 reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
102 dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg); 102 dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
103 103
104 /* Set the PCIe master AxCache attributes */ 104 /* Set the PCIe master AxCache attributes */
105 dw_pcie_writel_rc(pp, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); 105 dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
106 dw_pcie_writel_rc(pp, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); 106 dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
107 107
108 /* Set the PCIe master AxDomain attributes */ 108 /* Set the PCIe master AxDomain attributes */
109 reg = dw_pcie_readl_rc(pp, PCIE_ARUSER_REG); 109 reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG);
110 reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); 110 reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
111 reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; 111 reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
112 dw_pcie_writel_rc(pp, PCIE_ARUSER_REG, reg); 112 dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg);
113 113
114 reg = dw_pcie_readl_rc(pp, PCIE_AWUSER_REG); 114 reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG);
115 reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); 115 reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
116 reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; 116 reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
117 dw_pcie_writel_rc(pp, PCIE_AWUSER_REG, reg); 117 dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg);
118 118
119 /* Enable INT A-D interrupts */ 119 /* Enable INT A-D interrupts */
120 reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_MASK1_REG); 120 reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG);
121 reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | 121 reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
122 PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; 122 PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
123 dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_MASK1_REG, reg); 123 dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
124 124
125 if (!dw_pcie_link_up(pp)) { 125 if (!dw_pcie_link_up(pci)) {
126 /* Configuration done. Start LTSSM */ 126 /* Configuration done. Start LTSSM */
127 reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG); 127 reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
128 reg |= PCIE_APP_LTSSM_EN; 128 reg |= PCIE_APP_LTSSM_EN;
129 dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg); 129 dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
130 } 130 }
131 131
132 /* Wait until the link becomes active again */ 132 /* Wait until the link becomes active again */
133 if (dw_pcie_wait_for_link(pp)) 133 if (dw_pcie_wait_for_link(pci))
134 dev_err(pp->dev, "Link not up after reconfiguration\n"); 134 dev_err(pci->dev, "Link not up after reconfiguration\n");
135} 135}
136 136
137static void armada8k_pcie_host_init(struct pcie_port *pp) 137static void armada8k_pcie_host_init(struct pcie_port *pp)
138{ 138{
139 struct armada8k_pcie *pcie = to_armada8k_pcie(pp); 139 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
140 struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
140 141
141 dw_pcie_setup_rc(pp); 142 dw_pcie_setup_rc(pp);
142 armada8k_pcie_establish_link(pcie); 143 armada8k_pcie_establish_link(pcie);
@@ -145,7 +146,7 @@ static void armada8k_pcie_host_init(struct pcie_port *pp)
145static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) 146static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
146{ 147{
147 struct armada8k_pcie *pcie = arg; 148 struct armada8k_pcie *pcie = arg;
148 struct pcie_port *pp = &pcie->pp; 149 struct dw_pcie *pci = pcie->pci;
149 u32 val; 150 u32 val;
150 151
151 /* 152 /*
@@ -153,21 +154,21 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
153 * PCI device. However, they are also latched into the PCIe 154 * PCI device. However, they are also latched into the PCIe
154 * controller, so we simply discard them. 155 * controller, so we simply discard them.
155 */ 156 */
156 val = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG); 157 val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG);
157 dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG, val); 158 dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val);
158 159
159 return IRQ_HANDLED; 160 return IRQ_HANDLED;
160} 161}
161 162
162static struct pcie_host_ops armada8k_pcie_host_ops = { 163static struct dw_pcie_host_ops armada8k_pcie_host_ops = {
163 .link_up = armada8k_pcie_link_up,
164 .host_init = armada8k_pcie_host_init, 164 .host_init = armada8k_pcie_host_init,
165}; 165};
166 166
167static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, 167static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
168 struct platform_device *pdev) 168 struct platform_device *pdev)
169{ 169{
170 struct pcie_port *pp = &pcie->pp; 170 struct dw_pcie *pci = pcie->pci;
171 struct pcie_port *pp = &pci->pp;
171 struct device *dev = &pdev->dev; 172 struct device *dev = &pdev->dev;
172 int ret; 173 int ret;
173 174
@@ -196,10 +197,14 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
196 return 0; 197 return 0;
197} 198}
198 199
200static const struct dw_pcie_ops dw_pcie_ops = {
201 .link_up = armada8k_pcie_link_up,
202};
203
199static int armada8k_pcie_probe(struct platform_device *pdev) 204static int armada8k_pcie_probe(struct platform_device *pdev)
200{ 205{
206 struct dw_pcie *pci;
201 struct armada8k_pcie *pcie; 207 struct armada8k_pcie *pcie;
202 struct pcie_port *pp;
203 struct device *dev = &pdev->dev; 208 struct device *dev = &pdev->dev;
204 struct resource *base; 209 struct resource *base;
205 int ret; 210 int ret;
@@ -208,24 +213,30 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
208 if (!pcie) 213 if (!pcie)
209 return -ENOMEM; 214 return -ENOMEM;
210 215
216 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
217 if (!pci)
218 return -ENOMEM;
219
220 pci->dev = dev;
221 pci->ops = &dw_pcie_ops;
222
211 pcie->clk = devm_clk_get(dev, NULL); 223 pcie->clk = devm_clk_get(dev, NULL);
212 if (IS_ERR(pcie->clk)) 224 if (IS_ERR(pcie->clk))
213 return PTR_ERR(pcie->clk); 225 return PTR_ERR(pcie->clk);
214 226
215 clk_prepare_enable(pcie->clk); 227 clk_prepare_enable(pcie->clk);
216 228
217 pp = &pcie->pp;
218 pp->dev = dev;
219
220 /* Get the dw-pcie unit configuration/control registers base. */ 229 /* Get the dw-pcie unit configuration/control registers base. */
221 base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); 230 base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
222 pp->dbi_base = devm_ioremap_resource(dev, base); 231 pci->dbi_base = devm_ioremap_resource(dev, base);
223 if (IS_ERR(pp->dbi_base)) { 232 if (IS_ERR(pci->dbi_base)) {
224 dev_err(dev, "couldn't remap regs base %p\n", base); 233 dev_err(dev, "couldn't remap regs base %p\n", base);
225 ret = PTR_ERR(pp->dbi_base); 234 ret = PTR_ERR(pci->dbi_base);
226 goto fail; 235 goto fail;
227 } 236 }
228 237
238 platform_set_drvdata(pdev, pcie);
239
229 ret = armada8k_add_pcie_port(pcie, pdev); 240 ret = armada8k_add_pcie_port(pcie, pdev);
230 if (ret) 241 if (ret)
231 goto fail; 242 goto fail;
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 212786b27f1a..59ecc9e66436 100644
--- a/drivers/pci/host/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -24,10 +24,10 @@
24 24
25#include "pcie-designware.h" 25#include "pcie-designware.h"
26 26
27#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp) 27#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev)
28 28
29struct artpec6_pcie { 29struct artpec6_pcie {
30 struct pcie_port pp; /* pp.dbi_base is DT dbi */ 30 struct dw_pcie *pci;
31 struct regmap *regmap; /* DT axis,syscon-pcie */ 31 struct regmap *regmap; /* DT axis,syscon-pcie */
32 void __iomem *phy_base; /* DT phy */ 32 void __iomem *phy_base; /* DT phy */
33}; 33};
@@ -80,7 +80,8 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
80 80
81static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) 81static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
82{ 82{
83 struct pcie_port *pp = &artpec6_pcie->pp; 83 struct dw_pcie *pci = artpec6_pcie->pci;
84 struct pcie_port *pp = &pci->pp;
84 u32 val; 85 u32 val;
85 unsigned int retries; 86 unsigned int retries;
86 87
@@ -139,7 +140,7 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
139 * Enable writing to config regs. This is required as the Synopsys 140 * Enable writing to config regs. This is required as the Synopsys
140 * driver changes the class code. That register needs DBI write enable. 141 * driver changes the class code. That register needs DBI write enable.
141 */ 142 */
142 dw_pcie_writel_rc(pp, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); 143 dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
143 144
144 pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR; 145 pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
145 pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR; 146 pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
@@ -155,19 +156,20 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
155 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); 156 artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
156 157
157 /* check if the link is up or not */ 158 /* check if the link is up or not */
158 if (!dw_pcie_wait_for_link(pp)) 159 if (!dw_pcie_wait_for_link(pci))
159 return 0; 160 return 0;
160 161
161 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 162 dev_dbg(pci->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
162 dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0), 163 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
163 dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1)); 164 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
164 165
165 return -ETIMEDOUT; 166 return -ETIMEDOUT;
166} 167}
167 168
168static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) 169static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
169{ 170{
170 struct pcie_port *pp = &artpec6_pcie->pp; 171 struct dw_pcie *pci = artpec6_pcie->pci;
172 struct pcie_port *pp = &pci->pp;
171 173
172 if (IS_ENABLED(CONFIG_PCI_MSI)) 174 if (IS_ENABLED(CONFIG_PCI_MSI))
173 dw_pcie_msi_init(pp); 175 dw_pcie_msi_init(pp);
@@ -175,20 +177,22 @@ static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
175 177
176static void artpec6_pcie_host_init(struct pcie_port *pp) 178static void artpec6_pcie_host_init(struct pcie_port *pp)
177{ 179{
178 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp); 180 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
181 struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
179 182
180 artpec6_pcie_establish_link(artpec6_pcie); 183 artpec6_pcie_establish_link(artpec6_pcie);
181 artpec6_pcie_enable_interrupts(artpec6_pcie); 184 artpec6_pcie_enable_interrupts(artpec6_pcie);
182} 185}
183 186
184static struct pcie_host_ops artpec6_pcie_host_ops = { 187static struct dw_pcie_host_ops artpec6_pcie_host_ops = {
185 .host_init = artpec6_pcie_host_init, 188 .host_init = artpec6_pcie_host_init,
186}; 189};
187 190
188static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg) 191static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
189{ 192{
190 struct artpec6_pcie *artpec6_pcie = arg; 193 struct artpec6_pcie *artpec6_pcie = arg;
191 struct pcie_port *pp = &artpec6_pcie->pp; 194 struct dw_pcie *pci = artpec6_pcie->pci;
195 struct pcie_port *pp = &pci->pp;
192 196
193 return dw_handle_msi_irq(pp); 197 return dw_handle_msi_irq(pp);
194} 198}
@@ -196,8 +200,9 @@ static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
196static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, 200static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
197 struct platform_device *pdev) 201 struct platform_device *pdev)
198{ 202{
199 struct pcie_port *pp = &artpec6_pcie->pp; 203 struct dw_pcie *pci = artpec6_pcie->pci;
200 struct device *dev = pp->dev; 204 struct pcie_port *pp = &pci->pp;
205 struct device *dev = pci->dev;
201 int ret; 206 int ret;
202 207
203 if (IS_ENABLED(CONFIG_PCI_MSI)) { 208 if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -232,8 +237,8 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
232static int artpec6_pcie_probe(struct platform_device *pdev) 237static int artpec6_pcie_probe(struct platform_device *pdev)
233{ 238{
234 struct device *dev = &pdev->dev; 239 struct device *dev = &pdev->dev;
240 struct dw_pcie *pci;
235 struct artpec6_pcie *artpec6_pcie; 241 struct artpec6_pcie *artpec6_pcie;
236 struct pcie_port *pp;
237 struct resource *dbi_base; 242 struct resource *dbi_base;
238 struct resource *phy_base; 243 struct resource *phy_base;
239 int ret; 244 int ret;
@@ -242,13 +247,16 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
242 if (!artpec6_pcie) 247 if (!artpec6_pcie)
243 return -ENOMEM; 248 return -ENOMEM;
244 249
245 pp = &artpec6_pcie->pp; 250 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
246 pp->dev = dev; 251 if (!pci)
252 return -ENOMEM;
253
254 pci->dev = dev;
247 255
248 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 256 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
249 pp->dbi_base = devm_ioremap_resource(dev, dbi_base); 257 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
250 if (IS_ERR(pp->dbi_base)) 258 if (IS_ERR(pci->dbi_base))
251 return PTR_ERR(pp->dbi_base); 259 return PTR_ERR(pci->dbi_base);
252 260
253 phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); 261 phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
254 artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); 262 artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
@@ -261,6 +269,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
261 if (IS_ERR(artpec6_pcie->regmap)) 269 if (IS_ERR(artpec6_pcie->regmap))
262 return PTR_ERR(artpec6_pcie->regmap); 270 return PTR_ERR(artpec6_pcie->regmap);
263 271
272 platform_set_drvdata(pdev, artpec6_pcie);
273
264 ret = artpec6_add_pcie_port(artpec6_pcie, pdev); 274 ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
265 if (ret < 0) 275 if (ret < 0)
266 return ret; 276 return ret;
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/dwc/pcie-designware-host.c
index bed19994c1e9..5ba334938b52 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -11,239 +11,38 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/irq.h>
15#include <linux/irqdomain.h> 14#include <linux/irqdomain.h>
16#include <linux/kernel.h>
17#include <linux/msi.h>
18#include <linux/of_address.h> 15#include <linux/of_address.h>
19#include <linux/of_pci.h> 16#include <linux/of_pci.h>
20#include <linux/pci.h>
21#include <linux/pci_regs.h> 17#include <linux/pci_regs.h>
22#include <linux/platform_device.h> 18#include <linux/platform_device.h>
23#include <linux/types.h>
24#include <linux/delay.h>
25 19
26#include "pcie-designware.h" 20#include "pcie-designware.h"
27 21
28/* Parameters for the waiting for link up routine */
29#define LINK_WAIT_MAX_RETRIES 10
30#define LINK_WAIT_USLEEP_MIN 90000
31#define LINK_WAIT_USLEEP_MAX 100000
32
33/* Parameters for the waiting for iATU enabled routine */
34#define LINK_WAIT_MAX_IATU_RETRIES 5
35#define LINK_WAIT_IATU_MIN 9000
36#define LINK_WAIT_IATU_MAX 10000
37
38/* Synopsys-specific PCIe configuration registers */
39#define PCIE_PORT_LINK_CONTROL 0x710
40#define PORT_LINK_MODE_MASK (0x3f << 16)
41#define PORT_LINK_MODE_1_LANES (0x1 << 16)
42#define PORT_LINK_MODE_2_LANES (0x3 << 16)
43#define PORT_LINK_MODE_4_LANES (0x7 << 16)
44#define PORT_LINK_MODE_8_LANES (0xf << 16)
45
46#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
47#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
48#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
49#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
50#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
51#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
52#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
53
54#define PCIE_MSI_ADDR_LO 0x820
55#define PCIE_MSI_ADDR_HI 0x824
56#define PCIE_MSI_INTR0_ENABLE 0x828
57#define PCIE_MSI_INTR0_MASK 0x82C
58#define PCIE_MSI_INTR0_STATUS 0x830
59
60#define PCIE_ATU_VIEWPORT 0x900
61#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
62#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
63#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
64#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
65#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
66#define PCIE_ATU_CR1 0x904
67#define PCIE_ATU_TYPE_MEM (0x0 << 0)
68#define PCIE_ATU_TYPE_IO (0x2 << 0)
69#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
70#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
71#define PCIE_ATU_CR2 0x908
72#define PCIE_ATU_ENABLE (0x1 << 31)
73#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
74#define PCIE_ATU_LOWER_BASE 0x90C
75#define PCIE_ATU_UPPER_BASE 0x910
76#define PCIE_ATU_LIMIT 0x914
77#define PCIE_ATU_LOWER_TARGET 0x918
78#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
79#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
80#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
81#define PCIE_ATU_UPPER_TARGET 0x91C
82
83/*
84 * iATU Unroll-specific register definitions
85 * From 4.80 core version the address translation will be made by unroll
86 */
87#define PCIE_ATU_UNR_REGION_CTRL1 0x00
88#define PCIE_ATU_UNR_REGION_CTRL2 0x04
89#define PCIE_ATU_UNR_LOWER_BASE 0x08
90#define PCIE_ATU_UNR_UPPER_BASE 0x0C
91#define PCIE_ATU_UNR_LIMIT 0x10
92#define PCIE_ATU_UNR_LOWER_TARGET 0x14
93#define PCIE_ATU_UNR_UPPER_TARGET 0x18
94
95/* Register address builder */
96#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((0x3 << 20) | (region << 9))
97
98/* PCIe Port Logic registers */
99#define PLR_OFFSET 0x700
100#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
101#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
102#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
103
104static struct pci_ops dw_pcie_ops; 22static struct pci_ops dw_pcie_ops;
105 23
106int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
107{
108 if ((uintptr_t)addr & (size - 1)) {
109 *val = 0;
110 return PCIBIOS_BAD_REGISTER_NUMBER;
111 }
112
113 if (size == 4)
114 *val = readl(addr);
115 else if (size == 2)
116 *val = readw(addr);
117 else if (size == 1)
118 *val = readb(addr);
119 else {
120 *val = 0;
121 return PCIBIOS_BAD_REGISTER_NUMBER;
122 }
123
124 return PCIBIOS_SUCCESSFUL;
125}
126
127int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
128{
129 if ((uintptr_t)addr & (size - 1))
130 return PCIBIOS_BAD_REGISTER_NUMBER;
131
132 if (size == 4)
133 writel(val, addr);
134 else if (size == 2)
135 writew(val, addr);
136 else if (size == 1)
137 writeb(val, addr);
138 else
139 return PCIBIOS_BAD_REGISTER_NUMBER;
140
141 return PCIBIOS_SUCCESSFUL;
142}
143
144u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
145{
146 if (pp->ops->readl_rc)
147 return pp->ops->readl_rc(pp, reg);
148
149 return readl(pp->dbi_base + reg);
150}
151
152void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
153{
154 if (pp->ops->writel_rc)
155 pp->ops->writel_rc(pp, reg, val);
156 else
157 writel(val, pp->dbi_base + reg);
158}
159
160static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
161{
162 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
163
164 return dw_pcie_readl_rc(pp, offset + reg);
165}
166
167static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg,
168 u32 val)
169{
170 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
171
172 dw_pcie_writel_rc(pp, offset + reg, val);
173}
174
175static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, 24static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
176 u32 *val) 25 u32 *val)
177{ 26{
27 struct dw_pcie *pci;
28
178 if (pp->ops->rd_own_conf) 29 if (pp->ops->rd_own_conf)
179 return pp->ops->rd_own_conf(pp, where, size, val); 30 return pp->ops->rd_own_conf(pp, where, size, val);
180 31
181 return dw_pcie_cfg_read(pp->dbi_base + where, size, val); 32 pci = to_dw_pcie_from_pp(pp);
33 return dw_pcie_read(pci->dbi_base + where, size, val);
182} 34}
183 35
184static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, 36static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
185 u32 val) 37 u32 val)
186{ 38{
39 struct dw_pcie *pci;
40
187 if (pp->ops->wr_own_conf) 41 if (pp->ops->wr_own_conf)
188 return pp->ops->wr_own_conf(pp, where, size, val); 42 return pp->ops->wr_own_conf(pp, where, size, val);
189 43
190 return dw_pcie_cfg_write(pp->dbi_base + where, size, val); 44 pci = to_dw_pcie_from_pp(pp);
191} 45 return dw_pcie_write(pci->dbi_base + where, size, val);
192
193static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
194 int type, u64 cpu_addr, u64 pci_addr, u32 size)
195{
196 u32 retries, val;
197
198 if (pp->iatu_unroll_enabled) {
199 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE,
200 lower_32_bits(cpu_addr));
201 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE,
202 upper_32_bits(cpu_addr));
203 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT,
204 lower_32_bits(cpu_addr + size - 1));
205 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET,
206 lower_32_bits(pci_addr));
207 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET,
208 upper_32_bits(pci_addr));
209 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1,
210 type);
211 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2,
212 PCIE_ATU_ENABLE);
213 } else {
214 dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT,
215 PCIE_ATU_REGION_OUTBOUND | index);
216 dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE,
217 lower_32_bits(cpu_addr));
218 dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE,
219 upper_32_bits(cpu_addr));
220 dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT,
221 lower_32_bits(cpu_addr + size - 1));
222 dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET,
223 lower_32_bits(pci_addr));
224 dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET,
225 upper_32_bits(pci_addr));
226 dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type);
227 dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
228 }
229
230 /*
231 * Make sure ATU enable takes effect before any subsequent config
232 * and I/O accesses.
233 */
234 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
235 if (pp->iatu_unroll_enabled)
236 val = dw_pcie_readl_unroll(pp, index,
237 PCIE_ATU_UNR_REGION_CTRL2);
238 else
239 val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);
240
241 if (val == PCIE_ATU_ENABLE)
242 return;
243
244 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
245 }
246 dev_err(pp->dev, "iATU is not being enabled\n");
247} 46}
248 47
249static struct irq_chip dw_msi_irq_chip = { 48static struct irq_chip dw_msi_irq_chip = {
@@ -263,16 +62,15 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
263 62
264 for (i = 0; i < MAX_MSI_CTRLS; i++) { 63 for (i = 0; i < MAX_MSI_CTRLS; i++) {
265 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, 64 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
266 (u32 *)&val); 65 (u32 *)&val);
267 if (val) { 66 if (val) {
268 ret = IRQ_HANDLED; 67 ret = IRQ_HANDLED;
269 pos = 0; 68 pos = 0;
270 while ((pos = find_next_bit(&val, 32, pos)) != 32) { 69 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
271 irq = irq_find_mapping(pp->irq_domain, 70 irq = irq_find_mapping(pp->irq_domain,
272 i * 32 + pos); 71 i * 32 + pos);
273 dw_pcie_wr_own_conf(pp, 72 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
274 PCIE_MSI_INTR0_STATUS + i * 12, 73 i * 12, 4, 1 << pos);
275 4, 1 << pos);
276 generic_handle_irq(irq); 74 generic_handle_irq(irq);
277 pos++; 75 pos++;
278 } 76 }
@@ -338,8 +136,9 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
338static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 136static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
339{ 137{
340 int irq, pos0, i; 138 int irq, pos0, i;
341 struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc); 139 struct pcie_port *pp;
342 140
141 pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
343 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, 142 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
344 order_base_2(no_irqs)); 143 order_base_2(no_irqs));
345 if (pos0 < 0) 144 if (pos0 < 0)
@@ -401,7 +200,7 @@ static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
401} 200}
402 201
403static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, 202static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
404 struct msi_desc *desc) 203 struct msi_desc *desc)
405{ 204{
406 int irq, pos; 205 int irq, pos;
407 struct pcie_port *pp = pdev->bus->sysdata; 206 struct pcie_port *pp = pdev->bus->sysdata;
@@ -449,7 +248,7 @@ static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
449{ 248{
450 struct irq_data *data = irq_get_irq_data(irq); 249 struct irq_data *data = irq_get_irq_data(irq);
451 struct msi_desc *msi = irq_data_get_msi_desc(data); 250 struct msi_desc *msi = irq_data_get_msi_desc(data);
452 struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); 251 struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
453 252
454 clear_irq_range(pp, irq, 1, data->hwirq); 253 clear_irq_range(pp, irq, 1, data->hwirq);
455} 254}
@@ -460,38 +259,8 @@ static struct msi_controller dw_pcie_msi_chip = {
460 .teardown_irq = dw_msi_teardown_irq, 259 .teardown_irq = dw_msi_teardown_irq,
461}; 260};
462 261
463int dw_pcie_wait_for_link(struct pcie_port *pp)
464{
465 int retries;
466
467 /* check if the link is up or not */
468 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
469 if (dw_pcie_link_up(pp)) {
470 dev_info(pp->dev, "link up\n");
471 return 0;
472 }
473 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
474 }
475
476 dev_err(pp->dev, "phy link never came up\n");
477
478 return -ETIMEDOUT;
479}
480
481int dw_pcie_link_up(struct pcie_port *pp)
482{
483 u32 val;
484
485 if (pp->ops->link_up)
486 return pp->ops->link_up(pp);
487
488 val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
489 return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
490 (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
491}
492
493static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, 262static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
494 irq_hw_number_t hwirq) 263 irq_hw_number_t hwirq)
495{ 264{
496 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); 265 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
497 irq_set_chip_data(irq, domain->host_data); 266 irq_set_chip_data(irq, domain->host_data);
@@ -503,21 +272,12 @@ static const struct irq_domain_ops msi_domain_ops = {
503 .map = dw_pcie_msi_map, 272 .map = dw_pcie_msi_map,
504}; 273};
505 274
506static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
507{
508 u32 val;
509
510 val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
511 if (val == 0xffffffff)
512 return 1;
513
514 return 0;
515}
516
517int dw_pcie_host_init(struct pcie_port *pp) 275int dw_pcie_host_init(struct pcie_port *pp)
518{ 276{
519 struct device_node *np = pp->dev->of_node; 277 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
520 struct platform_device *pdev = to_platform_device(pp->dev); 278 struct device *dev = pci->dev;
279 struct device_node *np = dev->of_node;
280 struct platform_device *pdev = to_platform_device(dev);
521 struct pci_bus *bus, *child; 281 struct pci_bus *bus, *child;
522 struct resource *cfg_res; 282 struct resource *cfg_res;
523 int i, ret; 283 int i, ret;
@@ -526,19 +286,19 @@ int dw_pcie_host_init(struct pcie_port *pp)
526 286
527 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 287 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
528 if (cfg_res) { 288 if (cfg_res) {
529 pp->cfg0_size = resource_size(cfg_res)/2; 289 pp->cfg0_size = resource_size(cfg_res) / 2;
530 pp->cfg1_size = resource_size(cfg_res)/2; 290 pp->cfg1_size = resource_size(cfg_res) / 2;
531 pp->cfg0_base = cfg_res->start; 291 pp->cfg0_base = cfg_res->start;
532 pp->cfg1_base = cfg_res->start + pp->cfg0_size; 292 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
533 } else if (!pp->va_cfg0_base) { 293 } else if (!pp->va_cfg0_base) {
534 dev_err(pp->dev, "missing *config* reg space\n"); 294 dev_err(dev, "missing *config* reg space\n");
535 } 295 }
536 296
537 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); 297 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
538 if (ret) 298 if (ret)
539 return ret; 299 return ret;
540 300
541 ret = devm_request_pci_bus_resources(&pdev->dev, &res); 301 ret = devm_request_pci_bus_resources(dev, &res);
542 if (ret) 302 if (ret)
543 goto error; 303 goto error;
544 304
@@ -548,7 +308,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
548 case IORESOURCE_IO: 308 case IORESOURCE_IO:
549 ret = pci_remap_iospace(win->res, pp->io_base); 309 ret = pci_remap_iospace(win->res, pp->io_base);
550 if (ret) { 310 if (ret) {
551 dev_warn(pp->dev, "error %d: failed to map resource %pR\n", 311 dev_warn(dev, "error %d: failed to map resource %pR\n",
552 ret, win->res); 312 ret, win->res);
553 resource_list_destroy_entry(win); 313 resource_list_destroy_entry(win);
554 } else { 314 } else {
@@ -566,8 +326,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
566 break; 326 break;
567 case 0: 327 case 0:
568 pp->cfg = win->res; 328 pp->cfg = win->res;
569 pp->cfg0_size = resource_size(pp->cfg)/2; 329 pp->cfg0_size = resource_size(pp->cfg) / 2;
570 pp->cfg1_size = resource_size(pp->cfg)/2; 330 pp->cfg1_size = resource_size(pp->cfg) / 2;
571 pp->cfg0_base = pp->cfg->start; 331 pp->cfg0_base = pp->cfg->start;
572 pp->cfg1_base = pp->cfg->start + pp->cfg0_size; 332 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
573 break; 333 break;
@@ -577,11 +337,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
577 } 337 }
578 } 338 }
579 339
580 if (!pp->dbi_base) { 340 if (!pci->dbi_base) {
581 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start, 341 pci->dbi_base = devm_ioremap(dev, pp->cfg->start,
582 resource_size(pp->cfg)); 342 resource_size(pp->cfg));
583 if (!pp->dbi_base) { 343 if (!pci->dbi_base) {
584 dev_err(pp->dev, "error with ioremap\n"); 344 dev_err(dev, "error with ioremap\n");
585 ret = -ENOMEM; 345 ret = -ENOMEM;
586 goto error; 346 goto error;
587 } 347 }
@@ -590,40 +350,36 @@ int dw_pcie_host_init(struct pcie_port *pp)
590 pp->mem_base = pp->mem->start; 350 pp->mem_base = pp->mem->start;
591 351
592 if (!pp->va_cfg0_base) { 352 if (!pp->va_cfg0_base) {
593 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, 353 pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base,
594 pp->cfg0_size); 354 pp->cfg0_size);
595 if (!pp->va_cfg0_base) { 355 if (!pp->va_cfg0_base) {
596 dev_err(pp->dev, "error with ioremap in function\n"); 356 dev_err(dev, "error with ioremap in function\n");
597 ret = -ENOMEM; 357 ret = -ENOMEM;
598 goto error; 358 goto error;
599 } 359 }
600 } 360 }
601 361
602 if (!pp->va_cfg1_base) { 362 if (!pp->va_cfg1_base) {
603 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, 363 pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base,
604 pp->cfg1_size); 364 pp->cfg1_size);
605 if (!pp->va_cfg1_base) { 365 if (!pp->va_cfg1_base) {
606 dev_err(pp->dev, "error with ioremap\n"); 366 dev_err(dev, "error with ioremap\n");
607 ret = -ENOMEM; 367 ret = -ENOMEM;
608 goto error; 368 goto error;
609 } 369 }
610 } 370 }
611 371
612 ret = of_property_read_u32(np, "num-lanes", &pp->lanes); 372 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
613 if (ret) 373 if (ret)
614 pp->lanes = 0; 374 pci->num_viewport = 2;
615
616 ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport);
617 if (ret)
618 pp->num_viewport = 2;
619 375
620 if (IS_ENABLED(CONFIG_PCI_MSI)) { 376 if (IS_ENABLED(CONFIG_PCI_MSI)) {
621 if (!pp->ops->msi_host_init) { 377 if (!pp->ops->msi_host_init) {
622 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, 378 pp->irq_domain = irq_domain_add_linear(dev->of_node,
623 MAX_MSI_IRQS, &msi_domain_ops, 379 MAX_MSI_IRQS, &msi_domain_ops,
624 &dw_pcie_msi_chip); 380 &dw_pcie_msi_chip);
625 if (!pp->irq_domain) { 381 if (!pp->irq_domain) {
626 dev_err(pp->dev, "irq domain init failed\n"); 382 dev_err(dev, "irq domain init failed\n");
627 ret = -ENXIO; 383 ret = -ENXIO;
628 goto error; 384 goto error;
629 } 385 }
@@ -642,12 +398,12 @@ int dw_pcie_host_init(struct pcie_port *pp)
642 398
643 pp->root_bus_nr = pp->busn->start; 399 pp->root_bus_nr = pp->busn->start;
644 if (IS_ENABLED(CONFIG_PCI_MSI)) { 400 if (IS_ENABLED(CONFIG_PCI_MSI)) {
645 bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr, 401 bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
646 &dw_pcie_ops, pp, &res, 402 &dw_pcie_ops, pp, &res,
647 &dw_pcie_msi_chip); 403 &dw_pcie_msi_chip);
648 dw_pcie_msi_chip.dev = pp->dev; 404 dw_pcie_msi_chip.dev = dev;
649 } else 405 } else
650 bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, 406 bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
651 pp, &res); 407 pp, &res);
652 if (!bus) { 408 if (!bus) {
653 ret = -ENOMEM; 409 ret = -ENOMEM;
@@ -677,12 +433,13 @@ error:
677} 433}
678 434
679static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 435static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
680 u32 devfn, int where, int size, u32 *val) 436 u32 devfn, int where, int size, u32 *val)
681{ 437{
682 int ret, type; 438 int ret, type;
683 u32 busdev, cfg_size; 439 u32 busdev, cfg_size;
684 u64 cpu_addr; 440 u64 cpu_addr;
685 void __iomem *va_cfg_base; 441 void __iomem *va_cfg_base;
442 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
686 443
687 if (pp->ops->rd_other_conf) 444 if (pp->ops->rd_other_conf)
688 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); 445 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
@@ -702,12 +459,12 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
702 va_cfg_base = pp->va_cfg1_base; 459 va_cfg_base = pp->va_cfg1_base;
703 } 460 }
704 461
705 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 462 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
706 type, cpu_addr, 463 type, cpu_addr,
707 busdev, cfg_size); 464 busdev, cfg_size);
708 ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); 465 ret = dw_pcie_read(va_cfg_base + where, size, val);
709 if (pp->num_viewport <= 2) 466 if (pci->num_viewport <= 2)
710 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 467 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
711 PCIE_ATU_TYPE_IO, pp->io_base, 468 PCIE_ATU_TYPE_IO, pp->io_base,
712 pp->io_bus_addr, pp->io_size); 469 pp->io_bus_addr, pp->io_size);
713 470
@@ -715,12 +472,13 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
715} 472}
716 473
717static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 474static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
718 u32 devfn, int where, int size, u32 val) 475 u32 devfn, int where, int size, u32 val)
719{ 476{
720 int ret, type; 477 int ret, type;
721 u32 busdev, cfg_size; 478 u32 busdev, cfg_size;
722 u64 cpu_addr; 479 u64 cpu_addr;
723 void __iomem *va_cfg_base; 480 void __iomem *va_cfg_base;
481 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
724 482
725 if (pp->ops->wr_other_conf) 483 if (pp->ops->wr_other_conf)
726 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); 484 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
@@ -740,12 +498,12 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
740 va_cfg_base = pp->va_cfg1_base; 498 va_cfg_base = pp->va_cfg1_base;
741 } 499 }
742 500
743 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 501 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
744 type, cpu_addr, 502 type, cpu_addr,
745 busdev, cfg_size); 503 busdev, cfg_size);
746 ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); 504 ret = dw_pcie_write(va_cfg_base + where, size, val);
747 if (pp->num_viewport <= 2) 505 if (pci->num_viewport <= 2)
748 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 506 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
749 PCIE_ATU_TYPE_IO, pp->io_base, 507 PCIE_ATU_TYPE_IO, pp->io_base,
750 pp->io_bus_addr, pp->io_size); 508 pp->io_bus_addr, pp->io_size);
751 509
@@ -755,9 +513,11 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
755static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, 513static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
756 int dev) 514 int dev)
757{ 515{
516 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
517
758 /* If there is no link, then there is no device */ 518 /* If there is no link, then there is no device */
759 if (bus->number != pp->root_bus_nr) { 519 if (bus->number != pp->root_bus_nr) {
760 if (!dw_pcie_link_up(pp)) 520 if (!dw_pcie_link_up(pci))
761 return 0; 521 return 0;
762 } 522 }
763 523
@@ -769,7 +529,7 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
769} 529}
770 530
771static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 531static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
772 int size, u32 *val) 532 int size, u32 *val)
773{ 533{
774 struct pcie_port *pp = bus->sysdata; 534 struct pcie_port *pp = bus->sysdata;
775 535
@@ -785,7 +545,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
785} 545}
786 546
787static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 547static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
788 int where, int size, u32 val) 548 int where, int size, u32 val)
789{ 549{
790 struct pcie_port *pp = bus->sysdata; 550 struct pcie_port *pp = bus->sysdata;
791 551
@@ -803,78 +563,46 @@ static struct pci_ops dw_pcie_ops = {
803 .write = dw_pcie_wr_conf, 563 .write = dw_pcie_wr_conf,
804}; 564};
805 565
566static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
567{
568 u32 val;
569
570 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
571 if (val == 0xffffffff)
572 return 1;
573
574 return 0;
575}
576
806void dw_pcie_setup_rc(struct pcie_port *pp) 577void dw_pcie_setup_rc(struct pcie_port *pp)
807{ 578{
808 u32 val; 579 u32 val;
580 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
809 581
810 /* get iATU unroll support */ 582 dw_pcie_setup(pci);
811 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
812 dev_dbg(pp->dev, "iATU unroll: %s\n",
813 pp->iatu_unroll_enabled ? "enabled" : "disabled");
814
815 /* set the number of lanes */
816 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
817 val &= ~PORT_LINK_MODE_MASK;
818 switch (pp->lanes) {
819 case 1:
820 val |= PORT_LINK_MODE_1_LANES;
821 break;
822 case 2:
823 val |= PORT_LINK_MODE_2_LANES;
824 break;
825 case 4:
826 val |= PORT_LINK_MODE_4_LANES;
827 break;
828 case 8:
829 val |= PORT_LINK_MODE_8_LANES;
830 break;
831 default:
832 dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
833 return;
834 }
835 dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val);
836
837 /* set link width speed control register */
838 val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
839 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
840 switch (pp->lanes) {
841 case 1:
842 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
843 break;
844 case 2:
845 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
846 break;
847 case 4:
848 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
849 break;
850 case 8:
851 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
852 break;
853 }
854 dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
855 583
856 /* setup RC BARs */ 584 /* setup RC BARs */
857 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004); 585 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
858 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000); 586 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
859 587
860 /* setup interrupt pins */ 588 /* setup interrupt pins */
861 val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE); 589 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
862 val &= 0xffff00ff; 590 val &= 0xffff00ff;
863 val |= 0x00000100; 591 val |= 0x00000100;
864 dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val); 592 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
865 593
866 /* setup bus numbers */ 594 /* setup bus numbers */
867 val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS); 595 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
868 val &= 0xff000000; 596 val &= 0xff000000;
869 val |= 0x00010100; 597 val |= 0x00010100;
870 dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val); 598 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
871 599
872 /* setup command register */ 600 /* setup command register */
873 val = dw_pcie_readl_rc(pp, PCI_COMMAND); 601 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
874 val &= 0xffff0000; 602 val &= 0xffff0000;
875 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 603 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
876 PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 604 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
877 dw_pcie_writel_rc(pp, PCI_COMMAND, val); 605 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
878 606
879 /* 607 /*
880 * If the platform provides ->rd_other_conf, it means the platform 608 * If the platform provides ->rd_other_conf, it means the platform
@@ -882,11 +610,16 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
882 * we should not program the ATU here. 610 * we should not program the ATU here.
883 */ 611 */
884 if (!pp->ops->rd_other_conf) { 612 if (!pp->ops->rd_other_conf) {
885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 613 /* get iATU unroll support */
614 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
615 dev_dbg(pci->dev, "iATU unroll: %s\n",
616 pci->iatu_unroll_enabled ? "enabled" : "disabled");
617
618 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
886 PCIE_ATU_TYPE_MEM, pp->mem_base, 619 PCIE_ATU_TYPE_MEM, pp->mem_base,
887 pp->mem_bus_addr, pp->mem_size); 620 pp->mem_bus_addr, pp->mem_size);
888 if (pp->num_viewport > 2) 621 if (pci->num_viewport > 2)
889 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2, 622 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
890 PCIE_ATU_TYPE_IO, pp->io_base, 623 PCIE_ATU_TYPE_IO, pp->io_base,
891 pp->io_bus_addr, pp->io_size); 624 pp->io_bus_addr, pp->io_size);
892 } 625 }
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 1a02038c4640..65250f63515c 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -25,7 +25,7 @@
25#include "pcie-designware.h" 25#include "pcie-designware.h"
26 26
27struct dw_plat_pcie { 27struct dw_plat_pcie {
28 struct pcie_port pp; /* pp.dbi_base is DT 0th resource */ 28 struct dw_pcie *pci;
29}; 29};
30 30
31static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg) 31static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
@@ -37,21 +37,23 @@ static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
37 37
38static void dw_plat_pcie_host_init(struct pcie_port *pp) 38static void dw_plat_pcie_host_init(struct pcie_port *pp)
39{ 39{
40 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
41
40 dw_pcie_setup_rc(pp); 42 dw_pcie_setup_rc(pp);
41 dw_pcie_wait_for_link(pp); 43 dw_pcie_wait_for_link(pci);
42 44
43 if (IS_ENABLED(CONFIG_PCI_MSI)) 45 if (IS_ENABLED(CONFIG_PCI_MSI))
44 dw_pcie_msi_init(pp); 46 dw_pcie_msi_init(pp);
45} 47}
46 48
47static struct pcie_host_ops dw_plat_pcie_host_ops = { 49static struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
48 .host_init = dw_plat_pcie_host_init, 50 .host_init = dw_plat_pcie_host_init,
49}; 51};
50 52
51static int dw_plat_add_pcie_port(struct pcie_port *pp, 53static int dw_plat_add_pcie_port(struct pcie_port *pp,
52 struct platform_device *pdev) 54 struct platform_device *pdev)
53{ 55{
54 struct device *dev = pp->dev; 56 struct device *dev = &pdev->dev;
55 int ret; 57 int ret;
56 58
57 pp->irq = platform_get_irq(pdev, 1); 59 pp->irq = platform_get_irq(pdev, 1);
@@ -88,7 +90,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
88{ 90{
89 struct device *dev = &pdev->dev; 91 struct device *dev = &pdev->dev;
90 struct dw_plat_pcie *dw_plat_pcie; 92 struct dw_plat_pcie *dw_plat_pcie;
91 struct pcie_port *pp; 93 struct dw_pcie *pci;
92 struct resource *res; /* Resource from DT */ 94 struct resource *res; /* Resource from DT */
93 int ret; 95 int ret;
94 96
@@ -96,15 +98,20 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
96 if (!dw_plat_pcie) 98 if (!dw_plat_pcie)
97 return -ENOMEM; 99 return -ENOMEM;
98 100
99 pp = &dw_plat_pcie->pp; 101 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
100 pp->dev = dev; 102 if (!pci)
103 return -ENOMEM;
104
105 pci->dev = dev;
101 106
102 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 107 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
103 pp->dbi_base = devm_ioremap_resource(dev, res); 108 pci->dbi_base = devm_ioremap_resource(dev, res);
104 if (IS_ERR(pp->dbi_base)) 109 if (IS_ERR(pci->dbi_base))
105 return PTR_ERR(pp->dbi_base); 110 return PTR_ERR(pci->dbi_base);
111
112 platform_set_drvdata(pdev, dw_plat_pcie);
106 113
107 ret = dw_plat_add_pcie_port(pp, pdev); 114 ret = dw_plat_add_pcie_port(&pci->pp, pdev);
108 if (ret < 0) 115 if (ret < 0)
109 return ret; 116 return ret;
110 117
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
new file mode 100644
index 000000000000..7e1fb7d6643c
--- /dev/null
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -0,0 +1,233 @@
1/*
2 * Synopsys Designware PCIe host controller driver
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/delay.h>
15#include <linux/of.h>
16#include <linux/types.h>
17
18#include "pcie-designware.h"
19
20/* PCIe Port Logic registers */
21#define PLR_OFFSET 0x700
22#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
23#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
24#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
25
26int dw_pcie_read(void __iomem *addr, int size, u32 *val)
27{
28 if ((uintptr_t)addr & (size - 1)) {
29 *val = 0;
30 return PCIBIOS_BAD_REGISTER_NUMBER;
31 }
32
33 if (size == 4) {
34 *val = readl(addr);
35 } else if (size == 2) {
36 *val = readw(addr);
37 } else if (size == 1) {
38 *val = readb(addr);
39 } else {
40 *val = 0;
41 return PCIBIOS_BAD_REGISTER_NUMBER;
42 }
43
44 return PCIBIOS_SUCCESSFUL;
45}
46
47int dw_pcie_write(void __iomem *addr, int size, u32 val)
48{
49 if ((uintptr_t)addr & (size - 1))
50 return PCIBIOS_BAD_REGISTER_NUMBER;
51
52 if (size == 4)
53 writel(val, addr);
54 else if (size == 2)
55 writew(val, addr);
56 else if (size == 1)
57 writeb(val, addr);
58 else
59 return PCIBIOS_BAD_REGISTER_NUMBER;
60
61 return PCIBIOS_SUCCESSFUL;
62}
63
64u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
65{
66 if (pci->ops->readl_dbi)
67 return pci->ops->readl_dbi(pci, reg);
68
69 return readl(pci->dbi_base + reg);
70}
71
72void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
73{
74 if (pci->ops->writel_dbi)
75 pci->ops->writel_dbi(pci, reg, val);
76 else
77 writel(val, pci->dbi_base + reg);
78}
79
80static u32 dw_pcie_readl_unroll(struct dw_pcie *pci, u32 index, u32 reg)
81{
82 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
83
84 return dw_pcie_readl_dbi(pci, offset + reg);
85}
86
87static void dw_pcie_writel_unroll(struct dw_pcie *pci, u32 index, u32 reg,
88 u32 val)
89{
90 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
91
92 dw_pcie_writel_dbi(pci, offset + reg, val);
93}
94
95void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
96 u64 cpu_addr, u64 pci_addr, u32 size)
97{
98 u32 retries, val;
99
100 if (pci->iatu_unroll_enabled) {
101 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
102 lower_32_bits(cpu_addr));
103 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
104 upper_32_bits(cpu_addr));
105 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
106 lower_32_bits(cpu_addr + size - 1));
107 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
108 lower_32_bits(pci_addr));
109 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
110 upper_32_bits(pci_addr));
111 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
112 type);
113 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
114 PCIE_ATU_ENABLE);
115 } else {
116 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
117 PCIE_ATU_REGION_OUTBOUND | index);
118 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
119 lower_32_bits(cpu_addr));
120 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
121 upper_32_bits(cpu_addr));
122 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
123 lower_32_bits(cpu_addr + size - 1));
124 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
125 lower_32_bits(pci_addr));
126 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
127 upper_32_bits(pci_addr));
128 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
129 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
130 }
131
132 /*
133 * Make sure ATU enable takes effect before any subsequent config
134 * and I/O accesses.
135 */
136 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
137 if (pci->iatu_unroll_enabled)
138 val = dw_pcie_readl_unroll(pci, index,
139 PCIE_ATU_UNR_REGION_CTRL2);
140 else
141 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
142
143 if (val == PCIE_ATU_ENABLE)
144 return;
145
146 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
147 }
148 dev_err(pci->dev, "iATU is not being enabled\n");
149}
150
151int dw_pcie_wait_for_link(struct dw_pcie *pci)
152{
153 int retries;
154
155 /* check if the link is up or not */
156 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
157 if (dw_pcie_link_up(pci)) {
158 dev_info(pci->dev, "link up\n");
159 return 0;
160 }
161 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
162 }
163
164 dev_err(pci->dev, "phy link never came up\n");
165
166 return -ETIMEDOUT;
167}
168
169int dw_pcie_link_up(struct dw_pcie *pci)
170{
171 u32 val;
172
173 if (pci->ops->link_up)
174 return pci->ops->link_up(pci);
175
176 val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
177 return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
178 (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
179}
180
181void dw_pcie_setup(struct dw_pcie *pci)
182{
183 int ret;
184 u32 val;
185 u32 lanes;
186 struct device *dev = pci->dev;
187 struct device_node *np = dev->of_node;
188
189 ret = of_property_read_u32(np, "num-lanes", &lanes);
190 if (ret)
191 lanes = 0;
192
193 /* set the number of lanes */
194 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
195 val &= ~PORT_LINK_MODE_MASK;
196 switch (lanes) {
197 case 1:
198 val |= PORT_LINK_MODE_1_LANES;
199 break;
200 case 2:
201 val |= PORT_LINK_MODE_2_LANES;
202 break;
203 case 4:
204 val |= PORT_LINK_MODE_4_LANES;
205 break;
206 case 8:
207 val |= PORT_LINK_MODE_8_LANES;
208 break;
209 default:
210 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
211 return;
212 }
213 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
214
215 /* set link width speed control register */
216 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
217 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
218 switch (lanes) {
219 case 1:
220 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
221 break;
222 case 2:
223 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
224 break;
225 case 4:
226 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
227 break;
228 case 8:
229 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
230 break;
231 }
232 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
233}
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
new file mode 100644
index 000000000000..cd3b8713fe50
--- /dev/null
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -0,0 +1,198 @@
1/*
2 * Synopsys Designware PCIe host controller driver
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef _PCIE_DESIGNWARE_H
15#define _PCIE_DESIGNWARE_H
16
17#include <linux/irq.h>
18#include <linux/msi.h>
19#include <linux/pci.h>
20
21/* Parameters for the waiting for link up routine */
22#define LINK_WAIT_MAX_RETRIES 10
23#define LINK_WAIT_USLEEP_MIN 90000
24#define LINK_WAIT_USLEEP_MAX 100000
25
26/* Parameters for the waiting for iATU enabled routine */
27#define LINK_WAIT_MAX_IATU_RETRIES 5
28#define LINK_WAIT_IATU_MIN 9000
29#define LINK_WAIT_IATU_MAX 10000
30
31/* Synopsys-specific PCIe configuration registers */
32#define PCIE_PORT_LINK_CONTROL 0x710
33#define PORT_LINK_MODE_MASK (0x3f << 16)
34#define PORT_LINK_MODE_1_LANES (0x1 << 16)
35#define PORT_LINK_MODE_2_LANES (0x3 << 16)
36#define PORT_LINK_MODE_4_LANES (0x7 << 16)
37#define PORT_LINK_MODE_8_LANES (0xf << 16)
38
39#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
40#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
41#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
42#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
43#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
44#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
45#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
46
47#define PCIE_MSI_ADDR_LO 0x820
48#define PCIE_MSI_ADDR_HI 0x824
49#define PCIE_MSI_INTR0_ENABLE 0x828
50#define PCIE_MSI_INTR0_MASK 0x82C
51#define PCIE_MSI_INTR0_STATUS 0x830
52
53#define PCIE_ATU_VIEWPORT 0x900
54#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
55#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
56#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
57#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
58#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
59#define PCIE_ATU_CR1 0x904
60#define PCIE_ATU_TYPE_MEM (0x0 << 0)
61#define PCIE_ATU_TYPE_IO (0x2 << 0)
62#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
63#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
64#define PCIE_ATU_CR2 0x908
65#define PCIE_ATU_ENABLE (0x1 << 31)
66#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
67#define PCIE_ATU_LOWER_BASE 0x90C
68#define PCIE_ATU_UPPER_BASE 0x910
69#define PCIE_ATU_LIMIT 0x914
70#define PCIE_ATU_LOWER_TARGET 0x918
71#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
72#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
73#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
74#define PCIE_ATU_UPPER_TARGET 0x91C
75
76/*
77 * iATU Unroll-specific register definitions
78 * From 4.80 core version the address translation will be made by unroll
79 */
80#define PCIE_ATU_UNR_REGION_CTRL1 0x00
81#define PCIE_ATU_UNR_REGION_CTRL2 0x04
82#define PCIE_ATU_UNR_LOWER_BASE 0x08
83#define PCIE_ATU_UNR_UPPER_BASE 0x0C
84#define PCIE_ATU_UNR_LIMIT 0x10
85#define PCIE_ATU_UNR_LOWER_TARGET 0x14
86#define PCIE_ATU_UNR_UPPER_TARGET 0x18
87
88/* Register address builder */
89#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
90 ((0x3 << 20) | ((region) << 9))
91
92/*
93 * Maximum number of MSI IRQs can be 256 per controller. But keep
94 * it 32 as of now. Probably we will never need more than 32. If needed,
95 * then increment it in multiple of 32.
96 */
97#define MAX_MSI_IRQS 32
98#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
99
100struct pcie_port;
101struct dw_pcie;
102
103struct dw_pcie_host_ops {
104 int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
105 int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
106 int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
107 unsigned int devfn, int where, int size, u32 *val);
108 int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
109 unsigned int devfn, int where, int size, u32 val);
110 void (*host_init)(struct pcie_port *pp);
111 void (*msi_set_irq)(struct pcie_port *pp, int irq);
112 void (*msi_clear_irq)(struct pcie_port *pp, int irq);
113 phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
114 u32 (*get_msi_data)(struct pcie_port *pp, int pos);
115 void (*scan_bus)(struct pcie_port *pp);
116 int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
117};
118
119struct pcie_port {
120 u8 root_bus_nr;
121 u64 cfg0_base;
122 void __iomem *va_cfg0_base;
123 u32 cfg0_size;
124 u64 cfg1_base;
125 void __iomem *va_cfg1_base;
126 u32 cfg1_size;
127 resource_size_t io_base;
128 phys_addr_t io_bus_addr;
129 u32 io_size;
130 u64 mem_base;
131 phys_addr_t mem_bus_addr;
132 u32 mem_size;
133 struct resource *cfg;
134 struct resource *io;
135 struct resource *mem;
136 struct resource *busn;
137 int irq;
138 struct dw_pcie_host_ops *ops;
139 int msi_irq;
140 struct irq_domain *irq_domain;
141 unsigned long msi_data;
142 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
143};
144
145struct dw_pcie_ops {
146 u32 (*readl_dbi)(struct dw_pcie *pcie, u32 reg);
147 void (*writel_dbi)(struct dw_pcie *pcie, u32 reg, u32 val);
148 int (*link_up)(struct dw_pcie *pcie);
149};
150
151struct dw_pcie {
152 struct device *dev;
153 void __iomem *dbi_base;
154 u32 num_viewport;
155 u8 iatu_unroll_enabled;
156 struct pcie_port pp;
157 const struct dw_pcie_ops *ops;
158};
159
160#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
161
162int dw_pcie_read(void __iomem *addr, int size, u32 *val);
163int dw_pcie_write(void __iomem *addr, int size, u32 val);
164
165u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg);
166void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val);
167int dw_pcie_link_up(struct dw_pcie *pci);
168int dw_pcie_wait_for_link(struct dw_pcie *pci);
169void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
170 int type, u64 cpu_addr, u64 pci_addr,
171 u32 size);
172void dw_pcie_setup(struct dw_pcie *pci);
173
174#ifdef CONFIG_PCIE_DW_HOST
175irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
176void dw_pcie_msi_init(struct pcie_port *pp);
177void dw_pcie_setup_rc(struct pcie_port *pp);
178int dw_pcie_host_init(struct pcie_port *pp);
179#else
180static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
181{
182 return IRQ_NONE;
183}
184
185static inline void dw_pcie_msi_init(struct pcie_port *pp)
186{
187}
188
189static inline void dw_pcie_setup_rc(struct pcie_port *pp)
190{
191}
192
193static inline int dw_pcie_host_init(struct pcie_port *pp)
194{
195 return 0;
196}
197#endif
198#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c
index a301a7187b30..e3e4fedd9f68 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/dwc/pcie-hisi.c
@@ -24,10 +24,10 @@
24#include <linux/regmap.h> 24#include <linux/regmap.h>
25#include "../pci.h" 25#include "../pci.h"
26 26
27#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) 27#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
28 28
29static int hisi_pcie_acpi_rd_conf(struct pci_bus *bus, u32 devfn, int where, 29static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
30 int size, u32 *val) 30 int size, u32 *val)
31{ 31{
32 struct pci_config_window *cfg = bus->sysdata; 32 struct pci_config_window *cfg = bus->sysdata;
33 int dev = PCI_SLOT(devfn); 33 int dev = PCI_SLOT(devfn);
@@ -44,8 +44,8 @@ static int hisi_pcie_acpi_rd_conf(struct pci_bus *bus, u32 devfn, int where,
44 return pci_generic_config_read(bus, devfn, where, size, val); 44 return pci_generic_config_read(bus, devfn, where, size, val);
45} 45}
46 46
47static int hisi_pcie_acpi_wr_conf(struct pci_bus *bus, u32 devfn, 47static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
48 int where, int size, u32 val) 48 int where, int size, u32 val)
49{ 49{
50 struct pci_config_window *cfg = bus->sysdata; 50 struct pci_config_window *cfg = bus->sysdata;
51 int dev = PCI_SLOT(devfn); 51 int dev = PCI_SLOT(devfn);
@@ -74,6 +74,8 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
74 return pci_ecam_map_bus(bus, devfn, where); 74 return pci_ecam_map_bus(bus, devfn, where);
75} 75}
76 76
77#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
78
77static int hisi_pcie_init(struct pci_config_window *cfg) 79static int hisi_pcie_init(struct pci_config_window *cfg)
78{ 80{
79 struct device *dev = cfg->parent; 81 struct device *dev = cfg->parent;
@@ -110,8 +112,8 @@ struct pci_ecam_ops hisi_pcie_ops = {
110 .init = hisi_pcie_init, 112 .init = hisi_pcie_init,
111 .pci_ops = { 113 .pci_ops = {
112 .map_bus = hisi_pcie_map_bus, 114 .map_bus = hisi_pcie_map_bus,
113 .read = hisi_pcie_acpi_rd_conf, 115 .read = hisi_pcie_rd_conf,
114 .write = hisi_pcie_acpi_wr_conf, 116 .write = hisi_pcie_wr_conf,
115 } 117 }
116}; 118};
117 119
@@ -127,7 +129,7 @@ struct pci_ecam_ops hisi_pcie_ops = {
127#define PCIE_LTSSM_LINKUP_STATE 0x11 129#define PCIE_LTSSM_LINKUP_STATE 0x11
128#define PCIE_LTSSM_STATE_MASK 0x3F 130#define PCIE_LTSSM_STATE_MASK 0x3F
129 131
130#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp) 132#define to_hisi_pcie(x) dev_get_drvdata((x)->dev)
131 133
132struct hisi_pcie; 134struct hisi_pcie;
133 135
@@ -136,10 +138,10 @@ struct pcie_soc_ops {
136}; 138};
137 139
138struct hisi_pcie { 140struct hisi_pcie {
139 struct pcie_port pp; /* pp.dbi_base is DT rc_dbi */ 141 struct dw_pcie *pci;
140 struct regmap *subctrl; 142 struct regmap *subctrl;
141 u32 port_id; 143 u32 port_id;
142 struct pcie_soc_ops *soc_ops; 144 const struct pcie_soc_ops *soc_ops;
143}; 145};
144 146
145/* HipXX PCIe host only supports 32-bit config access */ 147/* HipXX PCIe host only supports 32-bit config access */
@@ -149,10 +151,11 @@ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
149 u32 reg; 151 u32 reg;
150 u32 reg_val; 152 u32 reg_val;
151 void *walker = &reg_val; 153 void *walker = &reg_val;
154 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
152 155
153 walker += (where & 0x3); 156 walker += (where & 0x3);
154 reg = where & ~0x3; 157 reg = where & ~0x3;
155 reg_val = dw_pcie_readl_rc(pp, reg); 158 reg_val = dw_pcie_readl_dbi(pci, reg);
156 159
157 if (size == 1) 160 if (size == 1)
158 *val = *(u8 __force *) walker; 161 *val = *(u8 __force *) walker;
@@ -173,19 +176,20 @@ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
173 u32 reg_val; 176 u32 reg_val;
174 u32 reg; 177 u32 reg;
175 void *walker = &reg_val; 178 void *walker = &reg_val;
179 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
176 180
177 walker += (where & 0x3); 181 walker += (where & 0x3);
178 reg = where & ~0x3; 182 reg = where & ~0x3;
179 if (size == 4) 183 if (size == 4)
180 dw_pcie_writel_rc(pp, reg, val); 184 dw_pcie_writel_dbi(pci, reg, val);
181 else if (size == 2) { 185 else if (size == 2) {
182 reg_val = dw_pcie_readl_rc(pp, reg); 186 reg_val = dw_pcie_readl_dbi(pci, reg);
183 *(u16 __force *) walker = val; 187 *(u16 __force *) walker = val;
184 dw_pcie_writel_rc(pp, reg, reg_val); 188 dw_pcie_writel_dbi(pci, reg, reg_val);
185 } else if (size == 1) { 189 } else if (size == 1) {
186 reg_val = dw_pcie_readl_rc(pp, reg); 190 reg_val = dw_pcie_readl_dbi(pci, reg);
187 *(u8 __force *) walker = val; 191 *(u8 __force *) walker = val;
188 dw_pcie_writel_rc(pp, reg, reg_val); 192 dw_pcie_writel_dbi(pci, reg, reg_val);
189 } else 193 } else
190 return PCIBIOS_BAD_REGISTER_NUMBER; 194 return PCIBIOS_BAD_REGISTER_NUMBER;
191 195
@@ -204,32 +208,32 @@ static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
204 208
205static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) 209static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
206{ 210{
207 struct pcie_port *pp = &hisi_pcie->pp; 211 struct dw_pcie *pci = hisi_pcie->pci;
208 u32 val; 212 u32 val;
209 213
210 val = dw_pcie_readl_rc(pp, PCIE_SYS_STATE4); 214 val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4);
211 215
212 return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); 216 return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
213} 217}
214 218
215static int hisi_pcie_link_up(struct pcie_port *pp) 219static int hisi_pcie_link_up(struct dw_pcie *pci)
216{ 220{
217 struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp); 221 struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci);
218 222
219 return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); 223 return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie);
220} 224}
221 225
222static struct pcie_host_ops hisi_pcie_host_ops = { 226static struct dw_pcie_host_ops hisi_pcie_host_ops = {
223 .rd_own_conf = hisi_pcie_cfg_read, 227 .rd_own_conf = hisi_pcie_cfg_read,
224 .wr_own_conf = hisi_pcie_cfg_write, 228 .wr_own_conf = hisi_pcie_cfg_write,
225 .link_up = hisi_pcie_link_up,
226}; 229};
227 230
228static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, 231static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
229 struct platform_device *pdev) 232 struct platform_device *pdev)
230{ 233{
231 struct pcie_port *pp = &hisi_pcie->pp; 234 struct dw_pcie *pci = hisi_pcie->pci;
232 struct device *dev = pp->dev; 235 struct pcie_port *pp = &pci->pp;
236 struct device *dev = &pdev->dev;
233 int ret; 237 int ret;
234 u32 port_id; 238 u32 port_id;
235 239
@@ -254,12 +258,15 @@ static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
254 return 0; 258 return 0;
255} 259}
256 260
261static const struct dw_pcie_ops dw_pcie_ops = {
262 .link_up = hisi_pcie_link_up,
263};
264
257static int hisi_pcie_probe(struct platform_device *pdev) 265static int hisi_pcie_probe(struct platform_device *pdev)
258{ 266{
259 struct device *dev = &pdev->dev; 267 struct device *dev = &pdev->dev;
268 struct dw_pcie *pci;
260 struct hisi_pcie *hisi_pcie; 269 struct hisi_pcie *hisi_pcie;
261 struct pcie_port *pp;
262 const struct of_device_id *match;
263 struct resource *reg; 270 struct resource *reg;
264 struct device_driver *driver; 271 struct device_driver *driver;
265 int ret; 272 int ret;
@@ -268,24 +275,30 @@ static int hisi_pcie_probe(struct platform_device *pdev)
268 if (!hisi_pcie) 275 if (!hisi_pcie)
269 return -ENOMEM; 276 return -ENOMEM;
270 277
271 pp = &hisi_pcie->pp; 278 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
272 pp->dev = dev; 279 if (!pci)
280 return -ENOMEM;
281
282 pci->dev = dev;
283 pci->ops = &dw_pcie_ops;
284
273 driver = dev->driver; 285 driver = dev->driver;
274 286
275 match = of_match_device(driver->of_match_table, dev); 287 hisi_pcie->soc_ops = of_device_get_match_data(dev);
276 hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data;
277 288
278 hisi_pcie->subctrl = 289 hisi_pcie->subctrl =
279 syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); 290 syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
280 if (IS_ERR(hisi_pcie->subctrl)) { 291 if (IS_ERR(hisi_pcie->subctrl)) {
281 dev_err(dev, "cannot get subctrl base\n"); 292 dev_err(dev, "cannot get subctrl base\n");
282 return PTR_ERR(hisi_pcie->subctrl); 293 return PTR_ERR(hisi_pcie->subctrl);
283 } 294 }
284 295
285 reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); 296 reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
286 pp->dbi_base = devm_ioremap_resource(dev, reg); 297 pci->dbi_base = devm_ioremap_resource(dev, reg);
287 if (IS_ERR(pp->dbi_base)) 298 if (IS_ERR(pci->dbi_base))
288 return PTR_ERR(pp->dbi_base); 299 return PTR_ERR(pci->dbi_base);
300
301 platform_set_drvdata(pdev, hisi_pcie);
289 302
290 ret = hisi_add_pcie_port(hisi_pcie, pdev); 303 ret = hisi_add_pcie_port(hisi_pcie, pdev);
291 if (ret) 304 if (ret)
@@ -323,4 +336,62 @@ static struct platform_driver hisi_pcie_driver = {
323}; 336};
324builtin_platform_driver(hisi_pcie_driver); 337builtin_platform_driver(hisi_pcie_driver);
325 338
339static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev)
340{
341 struct device *dev = &pdev->dev;
342 struct pci_ecam_ops *ops;
343
344 ops = (struct pci_ecam_ops *)of_device_get_match_data(dev);
345 return pci_host_common_probe(pdev, ops);
346}
347
348static int hisi_pcie_platform_init(struct pci_config_window *cfg)
349{
350 struct device *dev = cfg->parent;
351 struct platform_device *pdev = to_platform_device(dev);
352 struct resource *res;
353 void __iomem *reg_base;
354
355 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
356 if (!res) {
357 dev_err(dev, "missing \"reg[1]\"property\n");
358 return -EINVAL;
359 }
360
361 reg_base = devm_ioremap(dev, res->start, resource_size(res));
362 if (!reg_base)
363 return -ENOMEM;
364
365 cfg->priv = reg_base;
366 return 0;
367}
368
369struct pci_ecam_ops hisi_pcie_platform_ops = {
370 .bus_shift = 20,
371 .init = hisi_pcie_platform_init,
372 .pci_ops = {
373 .map_bus = hisi_pcie_map_bus,
374 .read = hisi_pcie_rd_conf,
375 .write = hisi_pcie_wr_conf,
376 }
377};
378
379static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
380 {
381 .compatible = "hisilicon,pcie-almost-ecam",
382 .data = (void *) &hisi_pcie_platform_ops,
383 },
384 {},
385};
386
387static struct platform_driver hisi_pcie_almost_ecam_driver = {
388 .probe = hisi_pcie_almost_ecam_probe,
389 .driver = {
390 .name = "hisi-pcie-almost-ecam",
391 .of_match_table = hisi_pcie_almost_ecam_of_match,
392 },
393};
394builtin_platform_driver(hisi_pcie_almost_ecam_driver);
395
396#endif
326#endif 397#endif
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 734ba0d4a5c8..e36abe0d9d6f 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -103,7 +103,7 @@ struct qcom_pcie_ops {
103}; 103};
104 104
105struct qcom_pcie { 105struct qcom_pcie {
106 struct pcie_port pp; /* pp.dbi_base is DT dbi */ 106 struct dw_pcie *pci;
107 void __iomem *parf; /* DT parf */ 107 void __iomem *parf; /* DT parf */
108 void __iomem *elbi; /* DT elbi */ 108 void __iomem *elbi; /* DT elbi */
109 union qcom_pcie_resources res; 109 union qcom_pcie_resources res;
@@ -112,7 +112,7 @@ struct qcom_pcie {
112 struct qcom_pcie_ops *ops; 112 struct qcom_pcie_ops *ops;
113}; 113};
114 114
115#define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp) 115#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
116 116
117static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 117static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
118{ 118{
@@ -155,21 +155,23 @@ static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
155 155
156static int qcom_pcie_establish_link(struct qcom_pcie *pcie) 156static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
157{ 157{
158 struct dw_pcie *pci = pcie->pci;
158 159
159 if (dw_pcie_link_up(&pcie->pp)) 160 if (dw_pcie_link_up(pci))
160 return 0; 161 return 0;
161 162
162 /* Enable Link Training state machine */ 163 /* Enable Link Training state machine */
163 if (pcie->ops->ltssm_enable) 164 if (pcie->ops->ltssm_enable)
164 pcie->ops->ltssm_enable(pcie); 165 pcie->ops->ltssm_enable(pcie);
165 166
166 return dw_pcie_wait_for_link(&pcie->pp); 167 return dw_pcie_wait_for_link(pci);
167} 168}
168 169
169static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) 170static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
170{ 171{
171 struct qcom_pcie_resources_v0 *res = &pcie->res.v0; 172 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
172 struct device *dev = pcie->pp.dev; 173 struct dw_pcie *pci = pcie->pci;
174 struct device *dev = pci->dev;
173 175
174 res->vdda = devm_regulator_get(dev, "vdda"); 176 res->vdda = devm_regulator_get(dev, "vdda");
175 if (IS_ERR(res->vdda)) 177 if (IS_ERR(res->vdda))
@@ -212,16 +214,14 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
212 return PTR_ERR(res->por_reset); 214 return PTR_ERR(res->por_reset);
213 215
214 res->phy_reset = devm_reset_control_get(dev, "phy"); 216 res->phy_reset = devm_reset_control_get(dev, "phy");
215 if (IS_ERR(res->phy_reset)) 217 return PTR_ERR_OR_ZERO(res->phy_reset);
216 return PTR_ERR(res->phy_reset);
217
218 return 0;
219} 218}
220 219
221static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) 220static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
222{ 221{
223 struct qcom_pcie_resources_v1 *res = &pcie->res.v1; 222 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
224 struct device *dev = pcie->pp.dev; 223 struct dw_pcie *pci = pcie->pci;
224 struct device *dev = pci->dev;
225 225
226 res->vdda = devm_regulator_get(dev, "vdda"); 226 res->vdda = devm_regulator_get(dev, "vdda");
227 if (IS_ERR(res->vdda)) 227 if (IS_ERR(res->vdda))
@@ -244,10 +244,7 @@ static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
244 return PTR_ERR(res->slave_bus); 244 return PTR_ERR(res->slave_bus);
245 245
246 res->core = devm_reset_control_get(dev, "core"); 246 res->core = devm_reset_control_get(dev, "core");
247 if (IS_ERR(res->core)) 247 return PTR_ERR_OR_ZERO(res->core);
248 return PTR_ERR(res->core);
249
250 return 0;
251} 248}
252 249
253static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) 250static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
@@ -270,7 +267,8 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
270static int qcom_pcie_init_v0(struct qcom_pcie *pcie) 267static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
271{ 268{
272 struct qcom_pcie_resources_v0 *res = &pcie->res.v0; 269 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
273 struct device *dev = pcie->pp.dev; 270 struct dw_pcie *pci = pcie->pci;
271 struct device *dev = pci->dev;
274 u32 val; 272 u32 val;
275 int ret; 273 int ret;
276 274
@@ -392,7 +390,8 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
392static int qcom_pcie_init_v1(struct qcom_pcie *pcie) 390static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
393{ 391{
394 struct qcom_pcie_resources_v1 *res = &pcie->res.v1; 392 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
395 struct device *dev = pcie->pp.dev; 393 struct dw_pcie *pci = pcie->pci;
394 struct device *dev = pci->dev;
396 int ret; 395 int ret;
397 396
398 ret = reset_control_deassert(res->core); 397 ret = reset_control_deassert(res->core);
@@ -459,7 +458,8 @@ err_res:
459static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) 458static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
460{ 459{
461 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 460 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
462 struct device *dev = pcie->pp.dev; 461 struct dw_pcie *pci = pcie->pci;
462 struct device *dev = pci->dev;
463 463
464 res->aux_clk = devm_clk_get(dev, "aux"); 464 res->aux_clk = devm_clk_get(dev, "aux");
465 if (IS_ERR(res->aux_clk)) 465 if (IS_ERR(res->aux_clk))
@@ -478,16 +478,14 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
478 return PTR_ERR(res->slave_clk); 478 return PTR_ERR(res->slave_clk);
479 479
480 res->pipe_clk = devm_clk_get(dev, "pipe"); 480 res->pipe_clk = devm_clk_get(dev, "pipe");
481 if (IS_ERR(res->pipe_clk)) 481 return PTR_ERR_OR_ZERO(res->pipe_clk);
482 return PTR_ERR(res->pipe_clk);
483
484 return 0;
485} 482}
486 483
487static int qcom_pcie_init_v2(struct qcom_pcie *pcie) 484static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
488{ 485{
489 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 486 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
490 struct device *dev = pcie->pp.dev; 487 struct dw_pcie *pci = pcie->pci;
488 struct device *dev = pci->dev;
491 u32 val; 489 u32 val;
492 int ret; 490 int ret;
493 491
@@ -551,7 +549,8 @@ err_cfg_clk:
551static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) 549static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
552{ 550{
553 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 551 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
554 struct device *dev = pcie->pp.dev; 552 struct dw_pcie *pci = pcie->pci;
553 struct device *dev = pci->dev;
555 int ret; 554 int ret;
556 555
557 ret = clk_prepare_enable(res->pipe_clk); 556 ret = clk_prepare_enable(res->pipe_clk);
@@ -563,10 +562,9 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
563 return 0; 562 return 0;
564} 563}
565 564
566static int qcom_pcie_link_up(struct pcie_port *pp) 565static int qcom_pcie_link_up(struct dw_pcie *pci)
567{ 566{
568 struct qcom_pcie *pcie = to_qcom_pcie(pp); 567 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
569 u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
570 568
571 return !!(val & PCI_EXP_LNKSTA_DLLLA); 569 return !!(val & PCI_EXP_LNKSTA_DLLLA);
572} 570}
@@ -584,7 +582,8 @@ static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
584 582
585static void qcom_pcie_host_init(struct pcie_port *pp) 583static void qcom_pcie_host_init(struct pcie_port *pp)
586{ 584{
587 struct qcom_pcie *pcie = to_qcom_pcie(pp); 585 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
586 struct qcom_pcie *pcie = to_qcom_pcie(pci);
588 int ret; 587 int ret;
589 588
590 qcom_ep_reset_assert(pcie); 589 qcom_ep_reset_assert(pcie);
@@ -622,19 +621,20 @@ err_deinit:
622static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, 621static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
623 u32 *val) 622 u32 *val)
624{ 623{
624 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
625
625 /* the device class is not reported correctly from the register */ 626 /* the device class is not reported correctly from the register */
626 if (where == PCI_CLASS_REVISION && size == 4) { 627 if (where == PCI_CLASS_REVISION && size == 4) {
627 *val = readl(pp->dbi_base + PCI_CLASS_REVISION); 628 *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
628 *val &= 0xff; /* keep revision id */ 629 *val &= 0xff; /* keep revision id */
629 *val |= PCI_CLASS_BRIDGE_PCI << 16; 630 *val |= PCI_CLASS_BRIDGE_PCI << 16;
630 return PCIBIOS_SUCCESSFUL; 631 return PCIBIOS_SUCCESSFUL;
631 } 632 }
632 633
633 return dw_pcie_cfg_read(pp->dbi_base + where, size, val); 634 return dw_pcie_read(pci->dbi_base + where, size, val);
634} 635}
635 636
636static struct pcie_host_ops qcom_pcie_dw_ops = { 637static struct dw_pcie_host_ops qcom_pcie_dw_ops = {
637 .link_up = qcom_pcie_link_up,
638 .host_init = qcom_pcie_host_init, 638 .host_init = qcom_pcie_host_init,
639 .rd_own_conf = qcom_pcie_rd_own_conf, 639 .rd_own_conf = qcom_pcie_rd_own_conf,
640}; 640};
@@ -661,19 +661,31 @@ static const struct qcom_pcie_ops ops_v2 = {
661 .ltssm_enable = qcom_pcie_v2_ltssm_enable, 661 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
662}; 662};
663 663
664static const struct dw_pcie_ops dw_pcie_ops = {
665 .link_up = qcom_pcie_link_up,
666};
667
664static int qcom_pcie_probe(struct platform_device *pdev) 668static int qcom_pcie_probe(struct platform_device *pdev)
665{ 669{
666 struct device *dev = &pdev->dev; 670 struct device *dev = &pdev->dev;
667 struct resource *res; 671 struct resource *res;
668 struct qcom_pcie *pcie;
669 struct pcie_port *pp; 672 struct pcie_port *pp;
673 struct dw_pcie *pci;
674 struct qcom_pcie *pcie;
670 int ret; 675 int ret;
671 676
672 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 677 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
673 if (!pcie) 678 if (!pcie)
674 return -ENOMEM; 679 return -ENOMEM;
675 680
676 pp = &pcie->pp; 681 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
682 if (!pci)
683 return -ENOMEM;
684
685 pci->dev = dev;
686 pci->ops = &dw_pcie_ops;
687 pp = &pci->pp;
688
677 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); 689 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
678 690
679 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); 691 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
@@ -686,9 +698,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
686 return PTR_ERR(pcie->parf); 698 return PTR_ERR(pcie->parf);
687 699
688 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 700 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
689 pp->dbi_base = devm_ioremap_resource(dev, res); 701 pci->dbi_base = devm_ioremap_resource(dev, res);
690 if (IS_ERR(pp->dbi_base)) 702 if (IS_ERR(pci->dbi_base))
691 return PTR_ERR(pp->dbi_base); 703 return PTR_ERR(pci->dbi_base);
692 704
693 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); 705 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
694 pcie->elbi = devm_ioremap_resource(dev, res); 706 pcie->elbi = devm_ioremap_resource(dev, res);
@@ -699,7 +711,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
699 if (IS_ERR(pcie->phy)) 711 if (IS_ERR(pcie->phy))
700 return PTR_ERR(pcie->phy); 712 return PTR_ERR(pcie->phy);
701 713
702 pp->dev = dev;
703 ret = pcie->ops->get_resources(pcie); 714 ret = pcie->ops->get_resources(pcie);
704 if (ret) 715 if (ret)
705 return ret; 716 return ret;
@@ -725,6 +736,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
725 if (ret) 736 if (ret)
726 return ret; 737 return ret;
727 738
739 platform_set_drvdata(pdev, pcie);
740
728 ret = dw_pcie_host_init(pp); 741 ret = dw_pcie_host_init(pp);
729 if (ret) { 742 if (ret) {
730 dev_err(dev, "cannot initialize host\n"); 743 dev_err(dev, "cannot initialize host\n");
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index dafe8b88d97d..348f9c5e0433 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -25,7 +25,7 @@
25#include "pcie-designware.h" 25#include "pcie-designware.h"
26 26
27struct spear13xx_pcie { 27struct spear13xx_pcie {
28 struct pcie_port pp; /* DT dbi is pp.dbi_base */ 28 struct dw_pcie *pci;
29 void __iomem *app_base; 29 void __iomem *app_base;
30 struct phy *phy; 30 struct phy *phy;
31 struct clk *clk; 31 struct clk *clk;
@@ -70,17 +70,18 @@ struct pcie_app_reg {
70 70
71#define EXP_CAP_ID_OFFSET 0x70 71#define EXP_CAP_ID_OFFSET 0x70
72 72
73#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp) 73#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
74 74
75static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) 75static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
76{ 76{
77 struct pcie_port *pp = &spear13xx_pcie->pp; 77 struct dw_pcie *pci = spear13xx_pcie->pci;
78 struct pcie_port *pp = &pci->pp;
78 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; 79 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
79 u32 val; 80 u32 val;
80 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 81 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
81 82
82 if (dw_pcie_link_up(pp)) { 83 if (dw_pcie_link_up(pci)) {
83 dev_err(pp->dev, "link already up\n"); 84 dev_err(pci->dev, "link already up\n");
84 return 0; 85 return 0;
85 } 86 }
86 87
@@ -91,34 +92,34 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
91 * default value in capability register is 512 bytes. So force 92 * default value in capability register is 512 bytes. So force
92 * it to 128 here. 93 * it to 128 here.
93 */ 94 */
94 dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); 95 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
95 val &= ~PCI_EXP_DEVCTL_READRQ; 96 val &= ~PCI_EXP_DEVCTL_READRQ;
96 dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); 97 dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
97 98
98 dw_pcie_cfg_write(pp->dbi_base + PCI_VENDOR_ID, 2, 0x104A); 99 dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
99 dw_pcie_cfg_write(pp->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); 100 dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
100 101
101 /* 102 /*
102 * if is_gen1 is set then handle it, so that some buggy card 103 * if is_gen1 is set then handle it, so that some buggy card
103 * also works 104 * also works
104 */ 105 */
105 if (spear13xx_pcie->is_gen1) { 106 if (spear13xx_pcie->is_gen1) {
106 dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, 107 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
107 4, &val); 108 4, &val);
108 if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 109 if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
109 val &= ~((u32)PCI_EXP_LNKCAP_SLS); 110 val &= ~((u32)PCI_EXP_LNKCAP_SLS);
110 val |= PCI_EXP_LNKCAP_SLS_2_5GB; 111 val |= PCI_EXP_LNKCAP_SLS_2_5GB;
111 dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + 112 dw_pcie_write(pci->dbi_base + exp_cap_off +
112 PCI_EXP_LNKCAP, 4, val); 113 PCI_EXP_LNKCAP, 4, val);
113 } 114 }
114 115
115 dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, 116 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
116 2, &val); 117 2, &val);
117 if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 118 if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
118 val &= ~((u32)PCI_EXP_LNKCAP_SLS); 119 val &= ~((u32)PCI_EXP_LNKCAP_SLS);
119 val |= PCI_EXP_LNKCAP_SLS_2_5GB; 120 val |= PCI_EXP_LNKCAP_SLS_2_5GB;
120 dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + 121 dw_pcie_write(pci->dbi_base + exp_cap_off +
121 PCI_EXP_LNKCTL2, 2, val); 122 PCI_EXP_LNKCTL2, 2, val);
122 } 123 }
123 } 124 }
124 125
@@ -128,14 +129,15 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
128 | ((u32)1 << REG_TRANSLATION_ENABLE), 129 | ((u32)1 << REG_TRANSLATION_ENABLE),
129 &app_reg->app_ctrl_0); 130 &app_reg->app_ctrl_0);
130 131
131 return dw_pcie_wait_for_link(pp); 132 return dw_pcie_wait_for_link(pci);
132} 133}
133 134
134static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) 135static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
135{ 136{
136 struct spear13xx_pcie *spear13xx_pcie = arg; 137 struct spear13xx_pcie *spear13xx_pcie = arg;
137 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; 138 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
138 struct pcie_port *pp = &spear13xx_pcie->pp; 139 struct dw_pcie *pci = spear13xx_pcie->pci;
140 struct pcie_port *pp = &pci->pp;
139 unsigned int status; 141 unsigned int status;
140 142
141 status = readl(&app_reg->int_sts); 143 status = readl(&app_reg->int_sts);
@@ -152,7 +154,8 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
152 154
153static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) 155static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
154{ 156{
155 struct pcie_port *pp = &spear13xx_pcie->pp; 157 struct dw_pcie *pci = spear13xx_pcie->pci;
158 struct pcie_port *pp = &pci->pp;
156 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; 159 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
157 160
158 /* Enable MSI interrupt */ 161 /* Enable MSI interrupt */
@@ -163,9 +166,9 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
163 } 166 }
164} 167}
165 168
166static int spear13xx_pcie_link_up(struct pcie_port *pp) 169static int spear13xx_pcie_link_up(struct dw_pcie *pci)
167{ 170{
168 struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); 171 struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
169 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; 172 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
170 173
171 if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) 174 if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
@@ -176,22 +179,23 @@ static int spear13xx_pcie_link_up(struct pcie_port *pp)
176 179
177static void spear13xx_pcie_host_init(struct pcie_port *pp) 180static void spear13xx_pcie_host_init(struct pcie_port *pp)
178{ 181{
179 struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); 182 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
183 struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
180 184
181 spear13xx_pcie_establish_link(spear13xx_pcie); 185 spear13xx_pcie_establish_link(spear13xx_pcie);
182 spear13xx_pcie_enable_interrupts(spear13xx_pcie); 186 spear13xx_pcie_enable_interrupts(spear13xx_pcie);
183} 187}
184 188
185static struct pcie_host_ops spear13xx_pcie_host_ops = { 189static struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
186 .link_up = spear13xx_pcie_link_up,
187 .host_init = spear13xx_pcie_host_init, 190 .host_init = spear13xx_pcie_host_init,
188}; 191};
189 192
190static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, 193static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
191 struct platform_device *pdev) 194 struct platform_device *pdev)
192{ 195{
193 struct pcie_port *pp = &spear13xx_pcie->pp; 196 struct dw_pcie *pci = spear13xx_pcie->pci;
194 struct device *dev = pp->dev; 197 struct pcie_port *pp = &pci->pp;
198 struct device *dev = &pdev->dev;
195 int ret; 199 int ret;
196 200
197 pp->irq = platform_get_irq(pdev, 0); 201 pp->irq = platform_get_irq(pdev, 0);
@@ -219,11 +223,15 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
219 return 0; 223 return 0;
220} 224}
221 225
226static const struct dw_pcie_ops dw_pcie_ops = {
227 .link_up = spear13xx_pcie_link_up,
228};
229
222static int spear13xx_pcie_probe(struct platform_device *pdev) 230static int spear13xx_pcie_probe(struct platform_device *pdev)
223{ 231{
224 struct device *dev = &pdev->dev; 232 struct device *dev = &pdev->dev;
233 struct dw_pcie *pci;
225 struct spear13xx_pcie *spear13xx_pcie; 234 struct spear13xx_pcie *spear13xx_pcie;
226 struct pcie_port *pp;
227 struct device_node *np = dev->of_node; 235 struct device_node *np = dev->of_node;
228 struct resource *dbi_base; 236 struct resource *dbi_base;
229 int ret; 237 int ret;
@@ -232,6 +240,13 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
232 if (!spear13xx_pcie) 240 if (!spear13xx_pcie)
233 return -ENOMEM; 241 return -ENOMEM;
234 242
243 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
244 if (!pci)
245 return -ENOMEM;
246
247 pci->dev = dev;
248 pci->ops = &dw_pcie_ops;
249
235 spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); 250 spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
236 if (IS_ERR(spear13xx_pcie->phy)) { 251 if (IS_ERR(spear13xx_pcie->phy)) {
237 ret = PTR_ERR(spear13xx_pcie->phy); 252 ret = PTR_ERR(spear13xx_pcie->phy);
@@ -255,26 +270,24 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
255 return ret; 270 return ret;
256 } 271 }
257 272
258 pp = &spear13xx_pcie->pp;
259 pp->dev = dev;
260
261 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 273 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
262 pp->dbi_base = devm_ioremap_resource(dev, dbi_base); 274 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
263 if (IS_ERR(pp->dbi_base)) { 275 if (IS_ERR(pci->dbi_base)) {
264 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); 276 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
265 ret = PTR_ERR(pp->dbi_base); 277 ret = PTR_ERR(pci->dbi_base);
266 goto fail_clk; 278 goto fail_clk;
267 } 279 }
268 spear13xx_pcie->app_base = pp->dbi_base + 0x2000; 280 spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
269 281
270 if (of_property_read_bool(np, "st,pcie-is-gen1")) 282 if (of_property_read_bool(np, "st,pcie-is-gen1"))
271 spear13xx_pcie->is_gen1 = true; 283 spear13xx_pcie->is_gen1 = true;
272 284
285 platform_set_drvdata(pdev, spear13xx_pcie);
286
273 ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); 287 ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
274 if (ret < 0) 288 if (ret < 0)
275 goto fail_clk; 289 goto fail_clk;
276 290
277 platform_set_drvdata(pdev, spear13xx_pcie);
278 return 0; 291 return 0;
279 292
280fail_clk: 293fail_clk:
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 898d2c48239c..f7c1d4d5c665 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -1,16 +1,6 @@
1menu "PCI host controller drivers" 1menu "PCI host controller drivers"
2 depends on PCI 2 depends on PCI
3 3
4config PCI_DRA7XX
5 bool "TI DRA7xx PCIe controller"
6 depends on OF && HAS_IOMEM && TI_PIPE3
7 depends on PCI_MSI_IRQ_DOMAIN
8 select PCIE_DW
9 help
10 Enables support for the PCIe controller in the DRA7xx SoC. There
11 are two instances of PCIe controller in DRA7xx. This controller can
12 act both as EP and RC. This reuses the Designware core.
13
14config PCI_MVEBU 4config PCI_MVEBU
15 bool "Marvell EBU PCIe controller" 5 bool "Marvell EBU PCIe controller"
16 depends on ARCH_MVEBU || ARCH_DOVE 6 depends on ARCH_MVEBU || ARCH_DOVE
@@ -37,36 +27,6 @@ config PCIE_XILINX_NWL
37 or End Point. The current option selection will only 27 or End Point. The current option selection will only
38 support root port enabling. 28 support root port enabling.
39 29
40config PCIE_DW_PLAT
41 bool "Platform bus based DesignWare PCIe Controller"
42 depends on PCI_MSI_IRQ_DOMAIN
43 select PCIE_DW
44 ---help---
45 This selects the DesignWare PCIe controller support. Select this if
46 you have a PCIe controller on Platform bus.
47
48 If you have a controller with this interface, say Y or M here.
49
50 If unsure, say N.
51
52config PCIE_DW
53 bool
54 depends on PCI_MSI_IRQ_DOMAIN
55
56config PCI_EXYNOS
57 bool "Samsung Exynos PCIe controller"
58 depends on SOC_EXYNOS5440
59 depends on PCI_MSI_IRQ_DOMAIN
60 select PCIEPORTBUS
61 select PCIE_DW
62
63config PCI_IMX6
64 bool "Freescale i.MX6 PCIe controller"
65 depends on SOC_IMX6Q
66 depends on PCI_MSI_IRQ_DOMAIN
67 select PCIEPORTBUS
68 select PCIE_DW
69
70config PCI_TEGRA 30config PCI_TEGRA
71 bool "NVIDIA Tegra PCIe controller" 31 bool "NVIDIA Tegra PCIe controller"
72 depends on ARCH_TEGRA 32 depends on ARCH_TEGRA
@@ -103,27 +63,6 @@ config PCI_HOST_GENERIC
103 Say Y here if you want to support a simple generic PCI host 63 Say Y here if you want to support a simple generic PCI host
104 controller, such as the one emulated by kvmtool. 64 controller, such as the one emulated by kvmtool.
105 65
106config PCIE_SPEAR13XX
107 bool "STMicroelectronics SPEAr PCIe controller"
108 depends on ARCH_SPEAR13XX
109 depends on PCI_MSI_IRQ_DOMAIN
110 select PCIEPORTBUS
111 select PCIE_DW
112 help
113 Say Y here if you want PCIe support on SPEAr13XX SoCs.
114
115config PCI_KEYSTONE
116 bool "TI Keystone PCIe controller"
117 depends on ARCH_KEYSTONE
118 depends on PCI_MSI_IRQ_DOMAIN
119 select PCIE_DW
120 select PCIEPORTBUS
121 help
122 Say Y here if you want to enable PCI controller support on Keystone
123 SoCs. The PCI controller on Keystone is based on Designware hardware
124 and therefore the driver re-uses the Designware core functions to
125 implement the driver.
126
127config PCIE_XILINX 66config PCIE_XILINX
128 bool "Xilinx AXI PCIe host bridge support" 67 bool "Xilinx AXI PCIe host bridge support"
129 depends on ARCH_ZYNQ || MICROBLAZE 68 depends on ARCH_ZYNQ || MICROBLAZE
@@ -150,15 +89,6 @@ config PCI_XGENE_MSI
150 Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. 89 Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
151 This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. 90 This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
152 91
153config PCI_LAYERSCAPE
154 bool "Freescale Layerscape PCIe controller"
155 depends on OF && (ARM || ARCH_LAYERSCAPE)
156 depends on PCI_MSI_IRQ_DOMAIN
157 select PCIE_DW
158 select MFD_SYSCON
159 help
160 Say Y here if you want PCIe controller support on Layerscape SoCs.
161
162config PCI_VERSATILE 92config PCI_VERSATILE
163 bool "ARM Versatile PB PCI controller" 93 bool "ARM Versatile PB PCI controller"
164 depends on ARCH_VERSATILE 94 depends on ARCH_VERSATILE
@@ -217,27 +147,6 @@ config PCIE_ALTERA_MSI
217 Say Y here if you want PCIe MSI support for the Altera FPGA. 147 Say Y here if you want PCIe MSI support for the Altera FPGA.
218 This MSI driver supports Altera MSI to GIC controller IP. 148 This MSI driver supports Altera MSI to GIC controller IP.
219 149
220config PCI_HISI
221 depends on OF && ARM64
222 bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
223 depends on PCI_MSI_IRQ_DOMAIN
224 select PCIEPORTBUS
225 select PCIE_DW
226 help
227 Say Y here if you want PCIe controller support on HiSilicon
228 Hip05 and Hip06 SoCs
229
230config PCIE_QCOM
231 bool "Qualcomm PCIe controller"
232 depends on ARCH_QCOM && OF
233 depends on PCI_MSI_IRQ_DOMAIN
234 select PCIE_DW
235 select PCIEPORTBUS
236 help
237 Say Y here to enable PCIe controller support on Qualcomm SoCs. The
238 PCIe controller uses the Designware core plus Qualcomm-specific
239 hardware wrappers.
240
241config PCI_HOST_THUNDER_PEM 150config PCI_HOST_THUNDER_PEM
242 bool "Cavium Thunder PCIe controller to off-chip devices" 151 bool "Cavium Thunder PCIe controller to off-chip devices"
243 depends on ARM64 152 depends on ARM64
@@ -254,28 +163,6 @@ config PCI_HOST_THUNDER_ECAM
254 help 163 help
255 Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. 164 Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
256 165
257config PCIE_ARMADA_8K
258 bool "Marvell Armada-8K PCIe controller"
259 depends on ARCH_MVEBU
260 depends on PCI_MSI_IRQ_DOMAIN
261 select PCIE_DW
262 select PCIEPORTBUS
263 help
264 Say Y here if you want to enable PCIe controller support on
265 Armada-8K SoCs. The PCIe controller on Armada-8K is based on
266 Designware hardware and therefore the driver re-uses the
267 Designware core functions to implement the driver.
268
269config PCIE_ARTPEC6
270 bool "Axis ARTPEC-6 PCIe controller"
271 depends on MACH_ARTPEC6
272 depends on PCI_MSI_IRQ_DOMAIN
273 select PCIE_DW
274 select PCIEPORTBUS
275 help
276 Say Y here to enable PCIe controller support on Axis ARTPEC-6
277 SoCs. This PCIe controller uses the DesignWare core.
278
279config PCIE_ROCKCHIP 166config PCIE_ROCKCHIP
280 bool "Rockchip PCIe controller" 167 bool "Rockchip PCIe controller"
281 depends on ARCH_ROCKCHIP || COMPILE_TEST 168 depends on ARCH_ROCKCHIP || COMPILE_TEST
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index bfe3179ae74c..4d3686676cc3 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,8 +1,3 @@
1obj-$(CONFIG_PCIE_DW) += pcie-designware.o
2obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
3obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
4obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
5obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
6obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o 1obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
7obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o 2obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
8obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o 3obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
@@ -11,12 +6,9 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
11obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o 6obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
12obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o 7obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
13obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o 8obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
14obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
15obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
16obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o 9obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
17obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o 10obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
18obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o 11obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
19obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
20obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o 12obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
21obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o 13obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
22obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o 14obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
@@ -24,9 +16,6 @@ obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
24obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o 16obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
25obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o 17obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
26obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o 18obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
27obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
28obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
29obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
30obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o 19obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
31obj-$(CONFIG_VMD) += vmd.o 20obj-$(CONFIG_VMD) += vmd.o
32 21
@@ -40,7 +29,6 @@ obj-$(CONFIG_VMD) += vmd.o
40# ARM64 and use internal ifdefs to only build the pieces we need 29# ARM64 and use internal ifdefs to only build the pieces we need
41# depending on whether ACPI, the DT driver, or both are enabled. 30# depending on whether ACPI, the DT driver, or both are enabled.
42 31
43obj-$(CONFIG_ARM64) += pcie-hisi.o
44obj-$(CONFIG_ARM64) += pci-thunder-ecam.o 32obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
45obj-$(CONFIG_ARM64) += pci-thunder-pem.o 33obj-$(CONFIG_ARM64) += pci-thunder-pem.o
46obj-$(CONFIG_ARM64) += pci-xgene.o 34obj-$(CONFIG_ARM64) += pci-xgene.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
deleted file mode 100644
index f1c544bb8b68..000000000000
--- a/drivers/pci/host/pci-exynos.c
+++ /dev/null
@@ -1,629 +0,0 @@
1/*
2 * PCIe host controller driver for Samsung EXYNOS SoCs
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/gpio.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/of_gpio.h>
21#include <linux/pci.h>
22#include <linux/platform_device.h>
23#include <linux/resource.h>
24#include <linux/signal.h>
25#include <linux/types.h>
26
27#include "pcie-designware.h"
28
29#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
30
31struct exynos_pcie {
32 struct pcie_port pp;
33 void __iomem *elbi_base; /* DT 0th resource */
34 void __iomem *phy_base; /* DT 1st resource */
35 void __iomem *block_base; /* DT 2nd resource */
36 int reset_gpio;
37 struct clk *clk;
38 struct clk *bus_clk;
39};
40
41/* PCIe ELBI registers */
42#define PCIE_IRQ_PULSE 0x000
43#define IRQ_INTA_ASSERT (0x1 << 0)
44#define IRQ_INTB_ASSERT (0x1 << 2)
45#define IRQ_INTC_ASSERT (0x1 << 4)
46#define IRQ_INTD_ASSERT (0x1 << 6)
47#define PCIE_IRQ_LEVEL 0x004
48#define PCIE_IRQ_SPECIAL 0x008
49#define PCIE_IRQ_EN_PULSE 0x00c
50#define PCIE_IRQ_EN_LEVEL 0x010
51#define IRQ_MSI_ENABLE (0x1 << 2)
52#define PCIE_IRQ_EN_SPECIAL 0x014
53#define PCIE_PWR_RESET 0x018
54#define PCIE_CORE_RESET 0x01c
55#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
56#define PCIE_STICKY_RESET 0x020
57#define PCIE_NONSTICKY_RESET 0x024
58#define PCIE_APP_INIT_RESET 0x028
59#define PCIE_APP_LTSSM_ENABLE 0x02c
60#define PCIE_ELBI_RDLH_LINKUP 0x064
61#define PCIE_ELBI_LTSSM_ENABLE 0x1
62#define PCIE_ELBI_SLV_AWMISC 0x11c
63#define PCIE_ELBI_SLV_ARMISC 0x120
64#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
65
66/* PCIe Purple registers */
67#define PCIE_PHY_GLOBAL_RESET 0x000
68#define PCIE_PHY_COMMON_RESET 0x004
69#define PCIE_PHY_CMN_REG 0x008
70#define PCIE_PHY_MAC_RESET 0x00c
71#define PCIE_PHY_PLL_LOCKED 0x010
72#define PCIE_PHY_TRSVREG_RESET 0x020
73#define PCIE_PHY_TRSV_RESET 0x024
74
75/* PCIe PHY registers */
76#define PCIE_PHY_IMPEDANCE 0x004
77#define PCIE_PHY_PLL_DIV_0 0x008
78#define PCIE_PHY_PLL_BIAS 0x00c
79#define PCIE_PHY_DCC_FEEDBACK 0x014
80#define PCIE_PHY_PLL_DIV_1 0x05c
81#define PCIE_PHY_COMMON_POWER 0x064
82#define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
83#define PCIE_PHY_TRSV0_EMP_LVL 0x084
84#define PCIE_PHY_TRSV0_DRV_LVL 0x088
85#define PCIE_PHY_TRSV0_RXCDR 0x0ac
86#define PCIE_PHY_TRSV0_POWER 0x0c4
87#define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
88#define PCIE_PHY_TRSV0_LVCC 0x0dc
89#define PCIE_PHY_TRSV1_EMP_LVL 0x144
90#define PCIE_PHY_TRSV1_RXCDR 0x16c
91#define PCIE_PHY_TRSV1_POWER 0x184
92#define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
93#define PCIE_PHY_TRSV1_LVCC 0x19c
94#define PCIE_PHY_TRSV2_EMP_LVL 0x204
95#define PCIE_PHY_TRSV2_RXCDR 0x22c
96#define PCIE_PHY_TRSV2_POWER 0x244
97#define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
98#define PCIE_PHY_TRSV2_LVCC 0x25c
99#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
100#define PCIE_PHY_TRSV3_RXCDR 0x2ec
101#define PCIE_PHY_TRSV3_POWER 0x304
102#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
103#define PCIE_PHY_TRSV3_LVCC 0x31c
104
105static void exynos_elb_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
106{
107 writel(val, exynos_pcie->elbi_base + reg);
108}
109
110static u32 exynos_elb_readl(struct exynos_pcie *exynos_pcie, u32 reg)
111{
112 return readl(exynos_pcie->elbi_base + reg);
113}
114
115static void exynos_phy_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
116{
117 writel(val, exynos_pcie->phy_base + reg);
118}
119
120static u32 exynos_phy_readl(struct exynos_pcie *exynos_pcie, u32 reg)
121{
122 return readl(exynos_pcie->phy_base + reg);
123}
124
125static void exynos_blk_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
126{
127 writel(val, exynos_pcie->block_base + reg);
128}
129
130static u32 exynos_blk_readl(struct exynos_pcie *exynos_pcie, u32 reg)
131{
132 return readl(exynos_pcie->block_base + reg);
133}
134
135static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *exynos_pcie,
136 bool on)
137{
138 u32 val;
139
140 if (on) {
141 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
142 val |= PCIE_ELBI_SLV_DBI_ENABLE;
143 exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
144 } else {
145 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
146 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
147 exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
148 }
149}
150
151static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *exynos_pcie,
152 bool on)
153{
154 u32 val;
155
156 if (on) {
157 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
158 val |= PCIE_ELBI_SLV_DBI_ENABLE;
159 exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
160 } else {
161 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
162 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
163 exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
164 }
165}
166
167static void exynos_pcie_assert_core_reset(struct exynos_pcie *exynos_pcie)
168{
169 u32 val;
170
171 val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
172 val &= ~PCIE_CORE_RESET_ENABLE;
173 exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
174 exynos_elb_writel(exynos_pcie, 0, PCIE_PWR_RESET);
175 exynos_elb_writel(exynos_pcie, 0, PCIE_STICKY_RESET);
176 exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
177}
178
179static void exynos_pcie_deassert_core_reset(struct exynos_pcie *exynos_pcie)
180{
181 u32 val;
182
183 val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
184 val |= PCIE_CORE_RESET_ENABLE;
185
186 exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
187 exynos_elb_writel(exynos_pcie, 1, PCIE_STICKY_RESET);
188 exynos_elb_writel(exynos_pcie, 1, PCIE_NONSTICKY_RESET);
189 exynos_elb_writel(exynos_pcie, 1, PCIE_APP_INIT_RESET);
190 exynos_elb_writel(exynos_pcie, 0, PCIE_APP_INIT_RESET);
191 exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
192}
193
194static void exynos_pcie_assert_phy_reset(struct exynos_pcie *exynos_pcie)
195{
196 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
197 exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
198}
199
200static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *exynos_pcie)
201{
202 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
203 exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
204 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
205 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_CMN_REG);
206 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSVREG_RESET);
207 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
208}
209
210static void exynos_pcie_power_on_phy(struct exynos_pcie *exynos_pcie)
211{
212 u32 val;
213
214 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
215 val &= ~PCIE_PHY_COMMON_PD_CMN;
216 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
217
218 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
219 val &= ~PCIE_PHY_TRSV0_PD_TSV;
220 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
221
222 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
223 val &= ~PCIE_PHY_TRSV1_PD_TSV;
224 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
225
226 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
227 val &= ~PCIE_PHY_TRSV2_PD_TSV;
228 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
229
230 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
231 val &= ~PCIE_PHY_TRSV3_PD_TSV;
232 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
233}
234
235static void exynos_pcie_power_off_phy(struct exynos_pcie *exynos_pcie)
236{
237 u32 val;
238
239 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
240 val |= PCIE_PHY_COMMON_PD_CMN;
241 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
242
243 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
244 val |= PCIE_PHY_TRSV0_PD_TSV;
245 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
246
247 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
248 val |= PCIE_PHY_TRSV1_PD_TSV;
249 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
250
251 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
252 val |= PCIE_PHY_TRSV2_PD_TSV;
253 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
254
255 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
256 val |= PCIE_PHY_TRSV3_PD_TSV;
257 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
258}
259
260static void exynos_pcie_init_phy(struct exynos_pcie *exynos_pcie)
261{
262 /* DCC feedback control off */
263 exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
264
265 /* set TX/RX impedance */
266 exynos_phy_writel(exynos_pcie, 0xd5, PCIE_PHY_IMPEDANCE);
267
268 /* set 50Mhz PHY clock */
269 exynos_phy_writel(exynos_pcie, 0x14, PCIE_PHY_PLL_DIV_0);
270 exynos_phy_writel(exynos_pcie, 0x12, PCIE_PHY_PLL_DIV_1);
271
272 /* set TX Differential output for lane 0 */
273 exynos_phy_writel(exynos_pcie, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
274
275 /* set TX Pre-emphasis Level Control for lane 0 to minimum */
276 exynos_phy_writel(exynos_pcie, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
277
278 /* set RX clock and data recovery bandwidth */
279 exynos_phy_writel(exynos_pcie, 0xe7, PCIE_PHY_PLL_BIAS);
280 exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV0_RXCDR);
281 exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV1_RXCDR);
282 exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV2_RXCDR);
283 exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV3_RXCDR);
284
285 /* change TX Pre-emphasis Level Control for lanes */
286 exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
287 exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
288 exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
289 exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
290
291 /* set LVCC */
292 exynos_phy_writel(exynos_pcie, 0x20, PCIE_PHY_TRSV0_LVCC);
293 exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV1_LVCC);
294 exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV2_LVCC);
295 exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
296}
297
298static void exynos_pcie_assert_reset(struct exynos_pcie *exynos_pcie)
299{
300 struct pcie_port *pp = &exynos_pcie->pp;
301 struct device *dev = pp->dev;
302
303 if (exynos_pcie->reset_gpio >= 0)
304 devm_gpio_request_one(dev, exynos_pcie->reset_gpio,
305 GPIOF_OUT_INIT_HIGH, "RESET");
306}
307
308static int exynos_pcie_establish_link(struct exynos_pcie *exynos_pcie)
309{
310 struct pcie_port *pp = &exynos_pcie->pp;
311 struct device *dev = pp->dev;
312 u32 val;
313
314 if (dw_pcie_link_up(pp)) {
315 dev_err(dev, "Link already up\n");
316 return 0;
317 }
318
319 exynos_pcie_assert_core_reset(exynos_pcie);
320 exynos_pcie_assert_phy_reset(exynos_pcie);
321 exynos_pcie_deassert_phy_reset(exynos_pcie);
322 exynos_pcie_power_on_phy(exynos_pcie);
323 exynos_pcie_init_phy(exynos_pcie);
324
325 /* pulse for common reset */
326 exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
327 udelay(500);
328 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
329
330 exynos_pcie_deassert_core_reset(exynos_pcie);
331 dw_pcie_setup_rc(pp);
332 exynos_pcie_assert_reset(exynos_pcie);
333
334 /* assert LTSSM enable */
335 exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
336 PCIE_APP_LTSSM_ENABLE);
337
338 /* check if the link is up or not */
339 if (!dw_pcie_wait_for_link(pp))
340 return 0;
341
342 while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
343 val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
344 dev_info(dev, "PLL Locked: 0x%x\n", val);
345 }
346 exynos_pcie_power_off_phy(exynos_pcie);
347 return -ETIMEDOUT;
348}
349
350static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *exynos_pcie)
351{
352 u32 val;
353
354 val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
355 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
356}
357
358static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *exynos_pcie)
359{
360 u32 val;
361
362 /* enable INTX interrupt */
363 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
364 IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
365 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE);
366}
367
368static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
369{
370 struct exynos_pcie *exynos_pcie = arg;
371
372 exynos_pcie_clear_irq_pulse(exynos_pcie);
373 return IRQ_HANDLED;
374}
375
376static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
377{
378 struct exynos_pcie *exynos_pcie = arg;
379 struct pcie_port *pp = &exynos_pcie->pp;
380
381 return dw_handle_msi_irq(pp);
382}
383
384static void exynos_pcie_msi_init(struct exynos_pcie *exynos_pcie)
385{
386 struct pcie_port *pp = &exynos_pcie->pp;
387 u32 val;
388
389 dw_pcie_msi_init(pp);
390
391 /* enable MSI interrupt */
392 val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
393 val |= IRQ_MSI_ENABLE;
394 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
395}
396
397static void exynos_pcie_enable_interrupts(struct exynos_pcie *exynos_pcie)
398{
399 exynos_pcie_enable_irq_pulse(exynos_pcie);
400
401 if (IS_ENABLED(CONFIG_PCI_MSI))
402 exynos_pcie_msi_init(exynos_pcie);
403}
404
405static u32 exynos_pcie_readl_rc(struct pcie_port *pp, u32 reg)
406{
407 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
408 u32 val;
409
410 exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
411 val = readl(pp->dbi_base + reg);
412 exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
413 return val;
414}
415
416static void exynos_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
417{
418 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
419
420 exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
421 writel(val, pp->dbi_base + reg);
422 exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
423}
424
425static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
426 u32 *val)
427{
428 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
429 int ret;
430
431 exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
432 ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
433 exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
434 return ret;
435}
436
437static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
438 u32 val)
439{
440 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
441 int ret;
442
443 exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
444 ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
445 exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
446 return ret;
447}
448
449static int exynos_pcie_link_up(struct pcie_port *pp)
450{
451 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
452 u32 val;
453
454 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
455 if (val == PCIE_ELBI_LTSSM_ENABLE)
456 return 1;
457
458 return 0;
459}
460
461static void exynos_pcie_host_init(struct pcie_port *pp)
462{
463 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
464
465 exynos_pcie_establish_link(exynos_pcie);
466 exynos_pcie_enable_interrupts(exynos_pcie);
467}
468
469static struct pcie_host_ops exynos_pcie_host_ops = {
470 .readl_rc = exynos_pcie_readl_rc,
471 .writel_rc = exynos_pcie_writel_rc,
472 .rd_own_conf = exynos_pcie_rd_own_conf,
473 .wr_own_conf = exynos_pcie_wr_own_conf,
474 .link_up = exynos_pcie_link_up,
475 .host_init = exynos_pcie_host_init,
476};
477
478static int __init exynos_add_pcie_port(struct exynos_pcie *exynos_pcie,
479 struct platform_device *pdev)
480{
481 struct pcie_port *pp = &exynos_pcie->pp;
482 struct device *dev = pp->dev;
483 int ret;
484
485 pp->irq = platform_get_irq(pdev, 1);
486 if (!pp->irq) {
487 dev_err(dev, "failed to get irq\n");
488 return -ENODEV;
489 }
490 ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
491 IRQF_SHARED, "exynos-pcie", exynos_pcie);
492 if (ret) {
493 dev_err(dev, "failed to request irq\n");
494 return ret;
495 }
496
497 if (IS_ENABLED(CONFIG_PCI_MSI)) {
498 pp->msi_irq = platform_get_irq(pdev, 0);
499 if (!pp->msi_irq) {
500 dev_err(dev, "failed to get msi irq\n");
501 return -ENODEV;
502 }
503
504 ret = devm_request_irq(dev, pp->msi_irq,
505 exynos_pcie_msi_irq_handler,
506 IRQF_SHARED | IRQF_NO_THREAD,
507 "exynos-pcie", exynos_pcie);
508 if (ret) {
509 dev_err(dev, "failed to request msi irq\n");
510 return ret;
511 }
512 }
513
514 pp->root_bus_nr = -1;
515 pp->ops = &exynos_pcie_host_ops;
516
517 ret = dw_pcie_host_init(pp);
518 if (ret) {
519 dev_err(dev, "failed to initialize host\n");
520 return ret;
521 }
522
523 return 0;
524}
525
526static int __init exynos_pcie_probe(struct platform_device *pdev)
527{
528 struct device *dev = &pdev->dev;
529 struct exynos_pcie *exynos_pcie;
530 struct pcie_port *pp;
531 struct device_node *np = dev->of_node;
532 struct resource *elbi_base;
533 struct resource *phy_base;
534 struct resource *block_base;
535 int ret;
536
537 exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL);
538 if (!exynos_pcie)
539 return -ENOMEM;
540
541 pp = &exynos_pcie->pp;
542 pp->dev = dev;
543
544 exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
545
546 exynos_pcie->clk = devm_clk_get(dev, "pcie");
547 if (IS_ERR(exynos_pcie->clk)) {
548 dev_err(dev, "Failed to get pcie rc clock\n");
549 return PTR_ERR(exynos_pcie->clk);
550 }
551 ret = clk_prepare_enable(exynos_pcie->clk);
552 if (ret)
553 return ret;
554
555 exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
556 if (IS_ERR(exynos_pcie->bus_clk)) {
557 dev_err(dev, "Failed to get pcie bus clock\n");
558 ret = PTR_ERR(exynos_pcie->bus_clk);
559 goto fail_clk;
560 }
561 ret = clk_prepare_enable(exynos_pcie->bus_clk);
562 if (ret)
563 goto fail_clk;
564
565 elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
566 exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base);
567 if (IS_ERR(exynos_pcie->elbi_base)) {
568 ret = PTR_ERR(exynos_pcie->elbi_base);
569 goto fail_bus_clk;
570 }
571
572 phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
573 exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
574 if (IS_ERR(exynos_pcie->phy_base)) {
575 ret = PTR_ERR(exynos_pcie->phy_base);
576 goto fail_bus_clk;
577 }
578
579 block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
580 exynos_pcie->block_base = devm_ioremap_resource(dev, block_base);
581 if (IS_ERR(exynos_pcie->block_base)) {
582 ret = PTR_ERR(exynos_pcie->block_base);
583 goto fail_bus_clk;
584 }
585
586 ret = exynos_add_pcie_port(exynos_pcie, pdev);
587 if (ret < 0)
588 goto fail_bus_clk;
589
590 platform_set_drvdata(pdev, exynos_pcie);
591 return 0;
592
593fail_bus_clk:
594 clk_disable_unprepare(exynos_pcie->bus_clk);
595fail_clk:
596 clk_disable_unprepare(exynos_pcie->clk);
597 return ret;
598}
599
600static int __exit exynos_pcie_remove(struct platform_device *pdev)
601{
602 struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev);
603
604 clk_disable_unprepare(exynos_pcie->bus_clk);
605 clk_disable_unprepare(exynos_pcie->clk);
606
607 return 0;
608}
609
610static const struct of_device_id exynos_pcie_of_match[] = {
611 { .compatible = "samsung,exynos5440-pcie", },
612 {},
613};
614
615static struct platform_driver exynos_pcie_driver = {
616 .remove = __exit_p(exynos_pcie_remove),
617 .driver = {
618 .name = "exynos-pcie",
619 .of_match_table = exynos_pcie_of_match,
620 },
621};
622
623/* Exynos PCIe driver does not allow module unload */
624
625static int __init exynos_pcie_init(void)
626{
627 return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
628}
629subsys_initcall(exynos_pcie_init);
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index e3c48b5deb93..e9a53bae1c25 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -145,7 +145,9 @@ int pci_host_common_probe(struct platform_device *pdev,
145 return -ENODEV; 145 return -ENODEV;
146 } 146 }
147 147
148#ifdef CONFIG_ARM
148 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); 149 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
150#endif
149 151
150 /* 152 /*
151 * We insert PCI resources into the iomem_resource and 153 * We insert PCI resources into the iomem_resource and
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 3efcc7bdc5fb..ada98569b78e 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -130,7 +130,8 @@ union pci_version {
130 */ 130 */
131union win_slot_encoding { 131union win_slot_encoding {
132 struct { 132 struct {
133 u32 func:8; 133 u32 dev:5;
134 u32 func:3;
134 u32 reserved:24; 135 u32 reserved:24;
135 } bits; 136 } bits;
136 u32 slot; 137 u32 slot;
@@ -485,7 +486,8 @@ static u32 devfn_to_wslot(int devfn)
485 union win_slot_encoding wslot; 486 union win_slot_encoding wslot;
486 487
487 wslot.slot = 0; 488 wslot.slot = 0;
488 wslot.bits.func = PCI_SLOT(devfn) | (PCI_FUNC(devfn) << 5); 489 wslot.bits.dev = PCI_SLOT(devfn);
490 wslot.bits.func = PCI_FUNC(devfn);
489 491
490 return wslot.slot; 492 return wslot.slot;
491} 493}
@@ -503,7 +505,7 @@ static int wslot_to_devfn(u32 wslot)
503 union win_slot_encoding slot_no; 505 union win_slot_encoding slot_no;
504 506
505 slot_no.slot = wslot; 507 slot_no.slot = wslot;
506 return PCI_DEVFN(0, slot_no.bits.func); 508 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
507} 509}
508 510
509/* 511/*
@@ -1315,6 +1317,18 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
1315 get_pcichild(hpdev, hv_pcidev_ref_initial); 1317 get_pcichild(hpdev, hv_pcidev_ref_initial);
1316 get_pcichild(hpdev, hv_pcidev_ref_childlist); 1318 get_pcichild(hpdev, hv_pcidev_ref_childlist);
1317 spin_lock_irqsave(&hbus->device_list_lock, flags); 1319 spin_lock_irqsave(&hbus->device_list_lock, flags);
1320
1321 /*
1322 * When a device is being added to the bus, we set the PCI domain
1323 * number to be the device serial number, which is non-zero and
1324 * unique on the same VM. The serial numbers start with 1, and
1325 * increase by 1 for each device. So device names including this
1326 * can have shorter names than based on the bus instance UUID.
1327 * Only the first device serial number is used for domain, so the
1328 * domain number will not change after the first device is added.
1329 */
1330 if (list_empty(&hbus->children))
1331 hbus->sysdata.domain = desc->ser;
1318 list_add_tail(&hpdev->list_entry, &hbus->children); 1332 list_add_tail(&hpdev->list_entry, &hbus->children);
1319 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 1333 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1320 return hpdev; 1334 return hpdev;
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 0c1540225ca3..5043b5f00ed8 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -65,7 +65,7 @@
65 (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) 65 (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
66#define TLP_CFG_DW2(bus, devfn, offset) \ 66#define TLP_CFG_DW2(bus, devfn, offset) \
67 (((bus) << 24) | ((devfn) << 16) | (offset)) 67 (((bus) << 24) | ((devfn) << 16) | (offset))
68#define TLP_COMP_STATUS(s) (((s) >> 12) & 7) 68#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
69#define TLP_HDR_SIZE 3 69#define TLP_HDR_SIZE 3
70#define TLP_LOOP 500 70#define TLP_LOOP 500
71 71
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
deleted file mode 100644
index a567ea288ee2..000000000000
--- a/drivers/pci/host/pcie-designware.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * Synopsys Designware PCIe host controller driver
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef _PCIE_DESIGNWARE_H
15#define _PCIE_DESIGNWARE_H
16
17/*
18 * Maximum number of MSI IRQs can be 256 per controller. But keep
19 * it 32 as of now. Probably we will never need more than 32. If needed,
20 * then increment it in multiple of 32.
21 */
22#define MAX_MSI_IRQS 32
23#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
24
25struct pcie_port {
26 struct device *dev;
27 u8 root_bus_nr;
28 void __iomem *dbi_base;
29 u64 cfg0_base;
30 void __iomem *va_cfg0_base;
31 u32 cfg0_size;
32 u64 cfg1_base;
33 void __iomem *va_cfg1_base;
34 u32 cfg1_size;
35 resource_size_t io_base;
36 phys_addr_t io_bus_addr;
37 u32 io_size;
38 u64 mem_base;
39 phys_addr_t mem_bus_addr;
40 u32 mem_size;
41 struct resource *cfg;
42 struct resource *io;
43 struct resource *mem;
44 struct resource *busn;
45 int irq;
46 u32 lanes;
47 u32 num_viewport;
48 struct pcie_host_ops *ops;
49 int msi_irq;
50 struct irq_domain *irq_domain;
51 unsigned long msi_data;
52 u8 iatu_unroll_enabled;
53 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
54};
55
56struct pcie_host_ops {
57 u32 (*readl_rc)(struct pcie_port *pp, u32 reg);
58 void (*writel_rc)(struct pcie_port *pp, u32 reg, u32 val);
59 int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
60 int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
61 int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
62 unsigned int devfn, int where, int size, u32 *val);
63 int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
64 unsigned int devfn, int where, int size, u32 val);
65 int (*link_up)(struct pcie_port *pp);
66 void (*host_init)(struct pcie_port *pp);
67 void (*msi_set_irq)(struct pcie_port *pp, int irq);
68 void (*msi_clear_irq)(struct pcie_port *pp, int irq);
69 phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
70 u32 (*get_msi_data)(struct pcie_port *pp, int pos);
71 void (*scan_bus)(struct pcie_port *pp);
72 int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
73};
74
75u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg);
76void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val);
77int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
78int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
79irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
80void dw_pcie_msi_init(struct pcie_port *pp);
81int dw_pcie_wait_for_link(struct pcie_port *pp);
82int dw_pcie_link_up(struct pcie_port *pp);
83void dw_pcie_setup_rc(struct pcie_port *pp);
84int dw_pcie_host_init(struct pcie_port *pp);
85
86#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 22d814a78a78..f4909bb0b2ad 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
47static int iproc_pcie_pltfm_probe(struct platform_device *pdev) 47static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
48{ 48{
49 struct device *dev = &pdev->dev; 49 struct device *dev = &pdev->dev;
50 const struct of_device_id *of_id;
51 struct iproc_pcie *pcie; 50 struct iproc_pcie *pcie;
52 struct device_node *np = dev->of_node; 51 struct device_node *np = dev->of_node;
53 struct resource reg; 52 struct resource reg;
@@ -55,16 +54,12 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
55 LIST_HEAD(res); 54 LIST_HEAD(res);
56 int ret; 55 int ret;
57 56
58 of_id = of_match_device(iproc_pcie_of_match_table, dev);
59 if (!of_id)
60 return -EINVAL;
61
62 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
63 if (!pcie) 58 if (!pcie)
64 return -ENOMEM; 59 return -ENOMEM;
65 60
66 pcie->dev = dev; 61 pcie->dev = dev;
67 pcie->type = (enum iproc_pcie_type)of_id->data; 62 pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
68 63
69 ret = of_address_to_resource(np, 0, &reg); 64 ret = of_address_to_resource(np, 0, &reg);
70 if (ret < 0) { 65 if (ret < 0) {
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 3ebc025499b9..0f39bd2a04cb 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -1205,7 +1205,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
1205 struct device *dev; 1205 struct device *dev;
1206 int ret; 1206 int ret;
1207 void *sysdata; 1207 void *sysdata;
1208 struct pci_bus *bus; 1208 struct pci_bus *bus, *child;
1209 1209
1210 dev = pcie->dev; 1210 dev = pcie->dev;
1211 1211
@@ -1278,6 +1278,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
1278 if (pcie->map_irq) 1278 if (pcie->map_irq)
1279 pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); 1279 pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
1280 1280
1281 list_for_each_entry(child, &bus->children, node)
1282 pcie_bus_configure_settings(child);
1283
1281 pci_bus_add_devices(bus); 1284 pci_bus_add_devices(bus);
1282 1285
1283 return 0; 1286 return 0;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 68d105aaf4e2..984c7e8cec5a 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -107,7 +107,7 @@ static void __exit ibm_acpiphp_exit(void);
107 107
108static acpi_handle ibm_acpi_handle; 108static acpi_handle ibm_acpi_handle;
109static struct notification ibm_note; 109static struct notification ibm_note;
110static struct bin_attribute ibm_apci_table_attr = { 110static struct bin_attribute ibm_apci_table_attr __ro_after_init = {
111 .attr = { 111 .attr = {
112 .name = "apci_table", 112 .name = "apci_table",
113 .mode = S_IRUGO, 113 .mode = S_IRUGO,
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index c614ff7c3bc3..3f93a4e79595 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -463,7 +463,6 @@ static inline int is_dlpar_capable(void)
463 463
464int __init rpadlpar_io_init(void) 464int __init rpadlpar_io_init(void)
465{ 465{
466 int rc = 0;
467 466
468 if (!is_dlpar_capable()) { 467 if (!is_dlpar_capable()) {
469 printk(KERN_WARNING "%s: partition not DLPAR capable\n", 468 printk(KERN_WARNING "%s: partition not DLPAR capable\n",
@@ -471,8 +470,7 @@ int __init rpadlpar_io_init(void)
471 return -EPERM; 470 return -EPERM;
472 } 471 }
473 472
474 rc = dlpar_sysfs_init(); 473 return dlpar_sysfs_init();
475 return rc;
476} 474}
477 475
478void rpadlpar_io_exit(void) 476void rpadlpar_io_exit(void)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 47227820406d..2479ae876482 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -124,7 +124,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
124 struct pci_sriov *iov = dev->sriov; 124 struct pci_sriov *iov = dev->sriov;
125 struct pci_bus *bus; 125 struct pci_bus *bus;
126 126
127 mutex_lock(&iov->dev->sriov->lock);
128 bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id)); 127 bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
129 if (!bus) 128 if (!bus)
130 goto failed; 129 goto failed;
@@ -162,7 +161,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
162 __pci_reset_function(virtfn); 161 __pci_reset_function(virtfn);
163 162
164 pci_device_add(virtfn, virtfn->bus); 163 pci_device_add(virtfn, virtfn->bus);
165 mutex_unlock(&iov->dev->sriov->lock);
166 164
167 pci_bus_add_device(virtfn); 165 pci_bus_add_device(virtfn);
168 sprintf(buf, "virtfn%u", id); 166 sprintf(buf, "virtfn%u", id);
@@ -181,12 +179,10 @@ failed2:
181 sysfs_remove_link(&dev->dev.kobj, buf); 179 sysfs_remove_link(&dev->dev.kobj, buf);
182failed1: 180failed1:
183 pci_dev_put(dev); 181 pci_dev_put(dev);
184 mutex_lock(&iov->dev->sriov->lock);
185 pci_stop_and_remove_bus_device(virtfn); 182 pci_stop_and_remove_bus_device(virtfn);
186failed0: 183failed0:
187 virtfn_remove_bus(dev->bus, bus); 184 virtfn_remove_bus(dev->bus, bus);
188failed: 185failed:
189 mutex_unlock(&iov->dev->sriov->lock);
190 186
191 return rc; 187 return rc;
192} 188}
@@ -195,7 +191,6 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
195{ 191{
196 char buf[VIRTFN_ID_LEN]; 192 char buf[VIRTFN_ID_LEN];
197 struct pci_dev *virtfn; 193 struct pci_dev *virtfn;
198 struct pci_sriov *iov = dev->sriov;
199 194
200 virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 195 virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
201 pci_iov_virtfn_bus(dev, id), 196 pci_iov_virtfn_bus(dev, id),
@@ -218,10 +213,8 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
218 if (virtfn->dev.kobj.sd) 213 if (virtfn->dev.kobj.sd)
219 sysfs_remove_link(&virtfn->dev.kobj, "physfn"); 214 sysfs_remove_link(&virtfn->dev.kobj, "physfn");
220 215
221 mutex_lock(&iov->dev->sriov->lock);
222 pci_stop_and_remove_bus_device(virtfn); 216 pci_stop_and_remove_bus_device(virtfn);
223 virtfn_remove_bus(dev->bus, virtfn->bus); 217 virtfn_remove_bus(dev->bus, virtfn->bus);
224 mutex_unlock(&iov->dev->sriov->lock);
225 218
226 /* balance pci_get_domain_bus_and_slot() */ 219 /* balance pci_get_domain_bus_and_slot() */
227 pci_dev_put(virtfn); 220 pci_dev_put(virtfn);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 50c5003295ca..93cc268c6ff1 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -32,32 +32,13 @@ int pci_msi_ignore_mask;
32#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) 32#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
33 33
34#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 34#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
35static struct irq_domain *pci_msi_default_domain;
36static DEFINE_MUTEX(pci_msi_domain_lock);
37
38struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
39{
40 return pci_msi_default_domain;
41}
42
43static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
44{
45 struct irq_domain *domain;
46
47 domain = dev_get_msi_domain(&dev->dev);
48 if (domain)
49 return domain;
50
51 return arch_get_pci_msi_domain(dev);
52}
53
54static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 35static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
55{ 36{
56 struct irq_domain *domain; 37 struct irq_domain *domain;
57 38
58 domain = pci_msi_get_domain(dev); 39 domain = dev_get_msi_domain(&dev->dev);
59 if (domain && irq_domain_is_hierarchy(domain)) 40 if (domain && irq_domain_is_hierarchy(domain))
60 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); 41 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
61 42
62 return arch_setup_msi_irqs(dev, nvec, type); 43 return arch_setup_msi_irqs(dev, nvec, type);
63} 44}
@@ -66,9 +47,9 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
66{ 47{
67 struct irq_domain *domain; 48 struct irq_domain *domain;
68 49
69 domain = pci_msi_get_domain(dev); 50 domain = dev_get_msi_domain(&dev->dev);
70 if (domain && irq_domain_is_hierarchy(domain)) 51 if (domain && irq_domain_is_hierarchy(domain))
71 pci_msi_domain_free_irqs(domain, dev); 52 msi_domain_free_irqs(domain, &dev->dev);
72 else 53 else
73 arch_teardown_msi_irqs(dev); 54 arch_teardown_msi_irqs(dev);
74} 55}
@@ -610,7 +591,7 @@ static int msi_verify_entries(struct pci_dev *dev)
610 * msi_capability_init - configure device's MSI capability structure 591 * msi_capability_init - configure device's MSI capability structure
611 * @dev: pointer to the pci_dev data structure of MSI device function 592 * @dev: pointer to the pci_dev data structure of MSI device function
612 * @nvec: number of interrupts to allocate 593 * @nvec: number of interrupts to allocate
613 * @affinity: flag to indicate cpu irq affinity mask should be set 594 * @affd: description of automatic irq affinity assignments (may be %NULL)
614 * 595 *
615 * Setup the MSI capability structure of the device with the requested 596 * Setup the MSI capability structure of the device with the requested
616 * number of interrupts. A return value of zero indicates the successful 597 * number of interrupts. A return value of zero indicates the successful
@@ -731,7 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
731 ret = 0; 712 ret = 0;
732out: 713out:
733 kfree(masks); 714 kfree(masks);
734 return 0; 715 return ret;
735} 716}
736 717
737static void msix_program_entries(struct pci_dev *dev, 718static void msix_program_entries(struct pci_dev *dev,
@@ -1084,7 +1065,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1084 if (nvec < 0) 1065 if (nvec < 0)
1085 return nvec; 1066 return nvec;
1086 if (nvec < minvec) 1067 if (nvec < minvec)
1087 return -EINVAL; 1068 return -ENOSPC;
1088 1069
1089 if (nvec > maxvec) 1070 if (nvec > maxvec)
1090 nvec = maxvec; 1071 nvec = maxvec;
@@ -1109,23 +1090,15 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1109 } 1090 }
1110} 1091}
1111 1092
1112/** 1093/* deprecated, don't use */
1113 * pci_enable_msi_range - configure device's MSI capability structure 1094int pci_enable_msi(struct pci_dev *dev)
1114 * @dev: device to configure
1115 * @minvec: minimal number of interrupts to configure
1116 * @maxvec: maximum number of interrupts to configure
1117 *
1118 * This function tries to allocate a maximum possible number of interrupts in a
1119 * range between @minvec and @maxvec. It returns a negative errno if an error
1120 * occurs. If it succeeds, it returns the actual number of interrupts allocated
1121 * and updates the @dev's irq member to the lowest new interrupt number;
1122 * the other interrupt numbers allocated to this device are consecutive.
1123 **/
1124int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
1125{ 1095{
1126 return __pci_enable_msi_range(dev, minvec, maxvec, NULL); 1096 int rc = __pci_enable_msi_range(dev, 1, 1, NULL);
1097 if (rc < 0)
1098 return rc;
1099 return 0;
1127} 1100}
1128EXPORT_SYMBOL(pci_enable_msi_range); 1101EXPORT_SYMBOL(pci_enable_msi);
1129 1102
1130static int __pci_enable_msix_range(struct pci_dev *dev, 1103static int __pci_enable_msix_range(struct pci_dev *dev,
1131 struct msix_entry *entries, int minvec, 1104 struct msix_entry *entries, int minvec,
@@ -1225,9 +1198,11 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1225 } 1198 }
1226 1199
1227 /* use legacy irq if allowed */ 1200 /* use legacy irq if allowed */
1228 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) { 1201 if (flags & PCI_IRQ_LEGACY) {
1229 pci_intx(dev, 1); 1202 if (min_vecs == 1 && dev->irq) {
1230 return 1; 1203 pci_intx(dev, 1);
1204 return 1;
1205 }
1231 } 1206 }
1232 1207
1233 return vecs; 1208 return vecs;
@@ -1381,7 +1356,7 @@ int pci_msi_domain_check_cap(struct irq_domain *domain,
1381{ 1356{
1382 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); 1357 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
1383 1358
1384 /* Special handling to support pci_enable_msi_range() */ 1359 /* Special handling to support __pci_enable_msi_range() */
1385 if (pci_msi_desc_is_multi_msi(desc) && 1360 if (pci_msi_desc_is_multi_msi(desc) &&
1386 !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) 1361 !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
1387 return 1; 1362 return 1;
@@ -1394,7 +1369,7 @@ int pci_msi_domain_check_cap(struct irq_domain *domain,
1394static int pci_msi_domain_handle_error(struct irq_domain *domain, 1369static int pci_msi_domain_handle_error(struct irq_domain *domain,
1395 struct msi_desc *desc, int error) 1370 struct msi_desc *desc, int error)
1396{ 1371{
1397 /* Special handling to support pci_enable_msi_range() */ 1372 /* Special handling to support __pci_enable_msi_range() */
1398 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) 1373 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
1399 return 1; 1374 return 1;
1400 1375
@@ -1481,59 +1456,6 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
1481} 1456}
1482EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); 1457EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
1483 1458
1484/**
1485 * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain
1486 * @domain: The interrupt domain to allocate from
1487 * @dev: The device for which to allocate
1488 * @nvec: The number of interrupts to allocate
1489 * @type: Unused to allow simpler migration from the arch_XXX interfaces
1490 *
1491 * Returns:
1492 * A virtual interrupt number or an error code in case of failure
1493 */
1494int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
1495 int nvec, int type)
1496{
1497 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
1498}
1499
1500/**
1501 * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain
1502 * @domain: The interrupt domain
1503 * @dev: The device for which to free interrupts
1504 */
1505void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
1506{
1507 msi_domain_free_irqs(domain, &dev->dev);
1508}
1509
1510/**
1511 * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
1512 * @fwnode: Optional fwnode of the interrupt controller
1513 * @info: MSI domain info
1514 * @parent: Parent irq domain
1515 *
1516 * Returns: A domain pointer or NULL in case of failure. If successful
1517 * the default PCI/MSI irqdomain pointer is updated.
1518 */
1519struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
1520 struct msi_domain_info *info, struct irq_domain *parent)
1521{
1522 struct irq_domain *domain;
1523
1524 mutex_lock(&pci_msi_domain_lock);
1525 if (pci_msi_default_domain) {
1526 pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
1527 domain = NULL;
1528 } else {
1529 domain = pci_msi_create_irq_domain(fwnode, info, parent);
1530 pci_msi_default_domain = domain;
1531 }
1532 mutex_unlock(&pci_msi_domain_lock);
1533
1534 return domain;
1535}
1536
1537static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) 1459static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
1538{ 1460{
1539 u32 *pa = data; 1461 u32 *pa = data;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1ccce1cd6aca..3e0516ee9eab 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -381,8 +381,6 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
381 id = pci_match_device(drv, pci_dev); 381 id = pci_match_device(drv, pci_dev);
382 if (id) 382 if (id)
383 error = pci_call_probe(drv, pci_dev, id); 383 error = pci_call_probe(drv, pci_dev, id);
384 if (error >= 0)
385 error = 0;
386 } 384 }
387 return error; 385 return error;
388} 386}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 066628776e1b..25d010d449a3 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -472,6 +472,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
472 const char *buf, size_t count) 472 const char *buf, size_t count)
473{ 473{
474 struct pci_dev *pdev = to_pci_dev(dev); 474 struct pci_dev *pdev = to_pci_dev(dev);
475 struct pci_sriov *iov = pdev->sriov;
475 int ret; 476 int ret;
476 u16 num_vfs; 477 u16 num_vfs;
477 478
@@ -482,38 +483,46 @@ static ssize_t sriov_numvfs_store(struct device *dev,
482 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 483 if (num_vfs > pci_sriov_get_totalvfs(pdev))
483 return -ERANGE; 484 return -ERANGE;
484 485
486 mutex_lock(&iov->dev->sriov->lock);
487
485 if (num_vfs == pdev->sriov->num_VFs) 488 if (num_vfs == pdev->sriov->num_VFs)
486 return count; /* no change */ 489 goto exit;
487 490
488 /* is PF driver loaded w/callback */ 491 /* is PF driver loaded w/callback */
489 if (!pdev->driver || !pdev->driver->sriov_configure) { 492 if (!pdev->driver || !pdev->driver->sriov_configure) {
490 dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n"); 493 dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
491 return -ENOSYS; 494 ret = -ENOENT;
495 goto exit;
492 } 496 }
493 497
494 if (num_vfs == 0) { 498 if (num_vfs == 0) {
495 /* disable VFs */ 499 /* disable VFs */
496 ret = pdev->driver->sriov_configure(pdev, 0); 500 ret = pdev->driver->sriov_configure(pdev, 0);
497 if (ret < 0) 501 goto exit;
498 return ret;
499 return count;
500 } 502 }
501 503
502 /* enable VFs */ 504 /* enable VFs */
503 if (pdev->sriov->num_VFs) { 505 if (pdev->sriov->num_VFs) {
504 dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n", 506 dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
505 pdev->sriov->num_VFs, num_vfs); 507 pdev->sriov->num_VFs, num_vfs);
506 return -EBUSY; 508 ret = -EBUSY;
509 goto exit;
507 } 510 }
508 511
509 ret = pdev->driver->sriov_configure(pdev, num_vfs); 512 ret = pdev->driver->sriov_configure(pdev, num_vfs);
510 if (ret < 0) 513 if (ret < 0)
511 return ret; 514 goto exit;
512 515
513 if (ret != num_vfs) 516 if (ret != num_vfs)
514 dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n", 517 dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
515 num_vfs, ret); 518 num_vfs, ret);
516 519
520exit:
521 mutex_unlock(&iov->dev->sriov->lock);
522
523 if (ret < 0)
524 return ret;
525
517 return count; 526 return count;
518} 527}
519 528
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index cb17db242f30..8dd38e69d6f2 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -270,7 +270,7 @@ struct pci_sriov {
270 u16 driver_max_VFs; /* max num VFs driver supports */ 270 u16 driver_max_VFs; /* max num VFs driver supports */
271 struct pci_dev *dev; /* lowest numbered PF */ 271 struct pci_dev *dev; /* lowest numbered PF */
272 struct pci_dev *self; /* this PF */ 272 struct pci_dev *self; /* this PF */
273 struct mutex lock; /* lock for VF bus */ 273 struct mutex lock; /* lock for setting sriov_numvfs in sysfs */
274 resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ 274 resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
275}; 275};
276 276
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 7ce77635e5ad..ac53edbc9613 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -71,6 +71,14 @@ config PCIEASPM_POWERSAVE
71 Enable PCI Express ASPM L0s and L1 where possible, even if the 71 Enable PCI Express ASPM L0s and L1 where possible, even if the
72 BIOS did not. 72 BIOS did not.
73 73
74config PCIEASPM_POWER_SUPERSAVE
75 bool "Power Supersave"
76 depends on PCIEASPM
77 help
78 Same as PCIEASPM_POWERSAVE, except it also enables L1 substates where
79 possible. This would result in higher power savings while staying in L1
80 where the components support it.
81
74config PCIEASPM_PERFORMANCE 82config PCIEASPM_PERFORMANCE
75 bool "Performance" 83 bool "Performance"
76 depends on PCIEASPM 84 depends on PCIEASPM
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 17ac1dce3286..a9bcd56e41ed 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -30,8 +30,29 @@
30#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ 30#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ 31#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32#define ASPM_STATE_L1 (4) /* L1 state */ 32#define ASPM_STATE_L1 (4) /* L1 state */
33#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
34#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
35#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
36#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
37#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
38#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
39#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
40 ASPM_STATE_L1_2_MASK)
33#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) 41#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
34#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) 42#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
43 ASPM_STATE_L1SS)
44
45/*
46 * When L1 substates are enabled, the LTR L1.2 threshold is a timing parameter
47 * that decides whether L1.1 or L1.2 is entered (Refer PCIe spec for details).
48 * Not sure is there is a way to "calculate" this on the fly, but maybe we
49 * could turn it into a parameter in future. This value has been taken from
50 * the following files from Intel's coreboot (which is the only code I found
51 * to have used this):
52 * https://www.coreboot.org/pipermail/coreboot-gerrit/2015-March/021134.html
53 * https://review.coreboot.org/#/c/8832/
54 */
55#define LTR_L1_2_THRESHOLD_BITS ((1 << 21) | (1 << 23) | (1 << 30))
35 56
36struct aspm_latency { 57struct aspm_latency {
37 u32 l0s; /* L0s latency (nsec) */ 58 u32 l0s; /* L0s latency (nsec) */
@@ -40,6 +61,7 @@ struct aspm_latency {
40 61
41struct pcie_link_state { 62struct pcie_link_state {
42 struct pci_dev *pdev; /* Upstream component of the Link */ 63 struct pci_dev *pdev; /* Upstream component of the Link */
64 struct pci_dev *downstream; /* Downstream component, function 0 */
43 struct pcie_link_state *root; /* pointer to the root port link */ 65 struct pcie_link_state *root; /* pointer to the root port link */
44 struct pcie_link_state *parent; /* pointer to the parent Link state */ 66 struct pcie_link_state *parent; /* pointer to the parent Link state */
45 struct list_head sibling; /* node in link_list */ 67 struct list_head sibling; /* node in link_list */
@@ -47,11 +69,11 @@ struct pcie_link_state {
47 struct list_head link; /* node in parent's children list */ 69 struct list_head link; /* node in parent's children list */
48 70
49 /* ASPM state */ 71 /* ASPM state */
50 u32 aspm_support:3; /* Supported ASPM state */ 72 u32 aspm_support:7; /* Supported ASPM state */
51 u32 aspm_enabled:3; /* Enabled ASPM state */ 73 u32 aspm_enabled:7; /* Enabled ASPM state */
52 u32 aspm_capable:3; /* Capable ASPM state with latency */ 74 u32 aspm_capable:7; /* Capable ASPM state with latency */
53 u32 aspm_default:3; /* Default ASPM state by BIOS */ 75 u32 aspm_default:7; /* Default ASPM state by BIOS */
54 u32 aspm_disable:3; /* Disabled ASPM state */ 76 u32 aspm_disable:7; /* Disabled ASPM state */
55 77
56 /* Clock PM state */ 78 /* Clock PM state */
57 u32 clkpm_capable:1; /* Clock PM capable? */ 79 u32 clkpm_capable:1; /* Clock PM capable? */
@@ -66,6 +88,14 @@ struct pcie_link_state {
66 * has one slot under it, so at most there are 8 functions. 88 * has one slot under it, so at most there are 8 functions.
67 */ 89 */
68 struct aspm_latency acceptable[8]; 90 struct aspm_latency acceptable[8];
91
92 /* L1 PM Substate info */
93 struct {
94 u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */
95 u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */
96 u32 ctl1; /* value to be programmed in ctl1 */
97 u32 ctl2; /* value to be programmed in ctl2 */
98 } l1ss;
69}; 99};
70 100
71static int aspm_disabled, aspm_force; 101static int aspm_disabled, aspm_force;
@@ -76,11 +106,14 @@ static LIST_HEAD(link_list);
76#define POLICY_DEFAULT 0 /* BIOS default setting */ 106#define POLICY_DEFAULT 0 /* BIOS default setting */
77#define POLICY_PERFORMANCE 1 /* high performance */ 107#define POLICY_PERFORMANCE 1 /* high performance */
78#define POLICY_POWERSAVE 2 /* high power saving */ 108#define POLICY_POWERSAVE 2 /* high power saving */
109#define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
79 110
80#ifdef CONFIG_PCIEASPM_PERFORMANCE 111#ifdef CONFIG_PCIEASPM_PERFORMANCE
81static int aspm_policy = POLICY_PERFORMANCE; 112static int aspm_policy = POLICY_PERFORMANCE;
82#elif defined CONFIG_PCIEASPM_POWERSAVE 113#elif defined CONFIG_PCIEASPM_POWERSAVE
83static int aspm_policy = POLICY_POWERSAVE; 114static int aspm_policy = POLICY_POWERSAVE;
115#elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
116static int aspm_policy = POLICY_POWER_SUPERSAVE;
84#else 117#else
85static int aspm_policy; 118static int aspm_policy;
86#endif 119#endif
@@ -88,7 +121,8 @@ static int aspm_policy;
88static const char *policy_str[] = { 121static const char *policy_str[] = {
89 [POLICY_DEFAULT] = "default", 122 [POLICY_DEFAULT] = "default",
90 [POLICY_PERFORMANCE] = "performance", 123 [POLICY_PERFORMANCE] = "performance",
91 [POLICY_POWERSAVE] = "powersave" 124 [POLICY_POWERSAVE] = "powersave",
125 [POLICY_POWER_SUPERSAVE] = "powersupersave"
92}; 126};
93 127
94#define LINK_RETRAIN_TIMEOUT HZ 128#define LINK_RETRAIN_TIMEOUT HZ
@@ -101,6 +135,9 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
101 return 0; 135 return 0;
102 case POLICY_POWERSAVE: 136 case POLICY_POWERSAVE:
103 /* Enable ASPM L0s/L1 */ 137 /* Enable ASPM L0s/L1 */
138 return (ASPM_STATE_L0S | ASPM_STATE_L1);
139 case POLICY_POWER_SUPERSAVE:
140 /* Enable Everything */
104 return ASPM_STATE_ALL; 141 return ASPM_STATE_ALL;
105 case POLICY_DEFAULT: 142 case POLICY_DEFAULT:
106 return link->aspm_default; 143 return link->aspm_default;
@@ -115,7 +152,8 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
115 /* Disable ASPM and Clock PM */ 152 /* Disable ASPM and Clock PM */
116 return 0; 153 return 0;
117 case POLICY_POWERSAVE: 154 case POLICY_POWERSAVE:
118 /* Disable Clock PM */ 155 case POLICY_POWER_SUPERSAVE:
156 /* Enable Clock PM */
119 return 1; 157 return 1;
120 case POLICY_DEFAULT: 158 case POLICY_DEFAULT:
121 return link->clkpm_default; 159 return link->clkpm_default;
@@ -278,11 +316,33 @@ static u32 calc_l1_acceptable(u32 encoding)
278 return (1000 << encoding); 316 return (1000 << encoding);
279} 317}
280 318
319/* Convert L1SS T_pwr encoding to usec */
320static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
321{
322 switch (scale) {
323 case 0:
324 return val * 2;
325 case 1:
326 return val * 10;
327 case 2:
328 return val * 100;
329 }
330 dev_err(&pdev->dev, "%s: Invalid T_PwrOn scale: %u\n",
331 __func__, scale);
332 return 0;
333}
334
281struct aspm_register_info { 335struct aspm_register_info {
282 u32 support:2; 336 u32 support:2;
283 u32 enabled:2; 337 u32 enabled:2;
284 u32 latency_encoding_l0s; 338 u32 latency_encoding_l0s;
285 u32 latency_encoding_l1; 339 u32 latency_encoding_l1;
340
341 /* L1 substates */
342 u32 l1ss_cap_ptr;
343 u32 l1ss_cap;
344 u32 l1ss_ctl1;
345 u32 l1ss_ctl2;
286}; 346};
287 347
288static void pcie_get_aspm_reg(struct pci_dev *pdev, 348static void pcie_get_aspm_reg(struct pci_dev *pdev,
@@ -297,6 +357,22 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
297 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 357 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
298 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16); 358 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
299 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; 359 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
360
361 /* Read L1 PM substate capabilities */
362 info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0;
363 info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
364 if (!info->l1ss_cap_ptr)
365 return;
366 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP,
367 &info->l1ss_cap);
368 if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) {
369 info->l1ss_cap = 0;
370 return;
371 }
372 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
373 &info->l1ss_ctl1);
374 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
375 &info->l1ss_ctl2);
300} 376}
301 377
302static void pcie_aspm_check_latency(struct pci_dev *endpoint) 378static void pcie_aspm_check_latency(struct pci_dev *endpoint)
@@ -327,6 +403,14 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
327 * Check L1 latency. 403 * Check L1 latency.
328 * Every switch on the path to root complex need 1 404 * Every switch on the path to root complex need 1
329 * more microsecond for L1. Spec doesn't mention L0s. 405 * more microsecond for L1. Spec doesn't mention L0s.
406 *
407 * The exit latencies for L1 substates are not advertised
408 * by a device. Since the spec also doesn't mention a way
409 * to determine max latencies introduced by enabling L1
410 * substates on the components, it is not clear how to do
411 * a L1 substate exit latency check. We assume that the
412 * L1 exit latencies advertised by a device include L1
413 * substate latencies (and hence do not do any check).
330 */ 414 */
331 latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); 415 latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
332 if ((link->aspm_capable & ASPM_STATE_L1) && 416 if ((link->aspm_capable & ASPM_STATE_L1) &&
@@ -338,6 +422,60 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
338 } 422 }
339} 423}
340 424
425/*
426 * The L1 PM substate capability is only implemented in function 0 in a
427 * multi function device.
428 */
429static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
430{
431 struct pci_dev *child;
432
433 list_for_each_entry(child, &linkbus->devices, bus_list)
434 if (PCI_FUNC(child->devfn) == 0)
435 return child;
436 return NULL;
437}
438
439/* Calculate L1.2 PM substate timing parameters */
440static void aspm_calc_l1ss_info(struct pcie_link_state *link,
441 struct aspm_register_info *upreg,
442 struct aspm_register_info *dwreg)
443{
444 u32 val1, val2, scale1, scale2;
445
446 link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr;
447 link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr;
448 link->l1ss.ctl1 = link->l1ss.ctl2 = 0;
449
450 if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
451 return;
452
453 /* Choose the greater of the two T_cmn_mode_rstr_time */
454 val1 = (upreg->l1ss_cap >> 8) & 0xFF;
455 val2 = (upreg->l1ss_cap >> 8) & 0xFF;
456 if (val1 > val2)
457 link->l1ss.ctl1 |= val1 << 8;
458 else
459 link->l1ss.ctl1 |= val2 << 8;
460 /*
461 * We currently use LTR L1.2 threshold to be fixed constant picked from
462 * Intel's coreboot.
463 */
464 link->l1ss.ctl1 |= LTR_L1_2_THRESHOLD_BITS;
465
466 /* Choose the greater of the two T_pwr_on */
467 val1 = (upreg->l1ss_cap >> 19) & 0x1F;
468 scale1 = (upreg->l1ss_cap >> 16) & 0x03;
469 val2 = (dwreg->l1ss_cap >> 19) & 0x1F;
470 scale2 = (dwreg->l1ss_cap >> 16) & 0x03;
471
472 if (calc_l1ss_pwron(link->pdev, scale1, val1) >
473 calc_l1ss_pwron(link->downstream, scale2, val2))
474 link->l1ss.ctl2 |= scale1 | (val1 << 3);
475 else
476 link->l1ss.ctl2 |= scale2 | (val2 << 3);
477}
478
341static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) 479static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
342{ 480{
343 struct pci_dev *child, *parent = link->pdev; 481 struct pci_dev *child, *parent = link->pdev;
@@ -353,8 +491,9 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
353 491
354 /* Get upstream/downstream components' register state */ 492 /* Get upstream/downstream components' register state */
355 pcie_get_aspm_reg(parent, &upreg); 493 pcie_get_aspm_reg(parent, &upreg);
356 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); 494 child = pci_function_0(linkbus);
357 pcie_get_aspm_reg(child, &dwreg); 495 pcie_get_aspm_reg(child, &dwreg);
496 link->downstream = child;
358 497
359 /* 498 /*
360 * If ASPM not supported, don't mess with the clocks and link, 499 * If ASPM not supported, don't mess with the clocks and link,
@@ -397,6 +536,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
397 link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); 536 link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
398 link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); 537 link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
399 538
539 /* Setup L1 substate */
540 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
541 link->aspm_support |= ASPM_STATE_L1_1;
542 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
543 link->aspm_support |= ASPM_STATE_L1_2;
544 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
545 link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
546 if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
547 link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
548
549 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
550 link->aspm_enabled |= ASPM_STATE_L1_1;
551 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
552 link->aspm_enabled |= ASPM_STATE_L1_2;
553 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
554 link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
555 if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
556 link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
557
558 if (link->aspm_support & ASPM_STATE_L1SS)
559 aspm_calc_l1ss_info(link, &upreg, &dwreg);
560
400 /* Save default state */ 561 /* Save default state */
401 link->aspm_default = link->aspm_enabled; 562 link->aspm_default = link->aspm_enabled;
402 563
@@ -435,6 +596,92 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
435 } 596 }
436} 597}
437 598
599static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
600 u32 clear, u32 set)
601{
602 u32 val;
603
604 pci_read_config_dword(pdev, pos, &val);
605 val &= ~clear;
606 val |= set;
607 pci_write_config_dword(pdev, pos, val);
608}
609
610/* Configure the ASPM L1 substates */
611static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
612{
613 u32 val, enable_req;
614 struct pci_dev *child = link->downstream, *parent = link->pdev;
615 u32 up_cap_ptr = link->l1ss.up_cap_ptr;
616 u32 dw_cap_ptr = link->l1ss.dw_cap_ptr;
617
618 enable_req = (link->aspm_enabled ^ state) & state;
619
620 /*
621 * Here are the rules specified in the PCIe spec for enabling L1SS:
622 * - When enabling L1.x, enable bit at parent first, then at child
623 * - When disabling L1.x, disable bit at child first, then at parent
624 * - When enabling ASPM L1.x, need to disable L1
625 * (at child followed by parent).
626 * - The ASPM/PCIPM L1.2 must be disabled while programming timing
627 * parameters
628 *
629 * To keep it simple, disable all L1SS bits first, and later enable
630 * what is needed.
631 */
632
633 /* Disable all L1 substates */
634 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
635 PCI_L1SS_CTL1_L1SS_MASK, 0);
636 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
637 PCI_L1SS_CTL1_L1SS_MASK, 0);
638 /*
639 * If needed, disable L1, and it gets enabled later
640 * in pcie_config_aspm_link().
641 */
642 if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
643 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
644 PCI_EXP_LNKCTL_ASPM_L1, 0);
645 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
646 PCI_EXP_LNKCTL_ASPM_L1, 0);
647 }
648
649 if (enable_req & ASPM_STATE_L1_2_MASK) {
650
651 /* Program T_pwr_on in both ports */
652 pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
653 link->l1ss.ctl2);
654 pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
655 link->l1ss.ctl2);
656
657 /* Program T_cmn_mode in parent */
658 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
659 0xFF00, link->l1ss.ctl1);
660
661 /* Program LTR L1.2 threshold in both ports */
662 pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1,
663 0xE3FF0000, link->l1ss.ctl1);
664 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
665 0xE3FF0000, link->l1ss.ctl1);
666 }
667
668 val = 0;
669 if (state & ASPM_STATE_L1_1)
670 val |= PCI_L1SS_CTL1_ASPM_L1_1;
671 if (state & ASPM_STATE_L1_2)
672 val |= PCI_L1SS_CTL1_ASPM_L1_2;
673 if (state & ASPM_STATE_L1_1_PCIPM)
674 val |= PCI_L1SS_CTL1_PCIPM_L1_1;
675 if (state & ASPM_STATE_L1_2_PCIPM)
676 val |= PCI_L1SS_CTL1_PCIPM_L1_2;
677
678 /* Enable what we need to enable */
679 pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
680 PCI_L1SS_CAP_L1_PM_SS, val);
681 pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
682 PCI_L1SS_CAP_L1_PM_SS, val);
683}
684
438static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) 685static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
439{ 686{
440 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 687 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
@@ -444,11 +691,23 @@ static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
444static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) 691static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
445{ 692{
446 u32 upstream = 0, dwstream = 0; 693 u32 upstream = 0, dwstream = 0;
447 struct pci_dev *child, *parent = link->pdev; 694 struct pci_dev *child = link->downstream, *parent = link->pdev;
448 struct pci_bus *linkbus = parent->subordinate; 695 struct pci_bus *linkbus = parent->subordinate;
449 696
450 /* Nothing to do if the link is already in the requested state */ 697 /* Enable only the states that were not explicitly disabled */
451 state &= (link->aspm_capable & ~link->aspm_disable); 698 state &= (link->aspm_capable & ~link->aspm_disable);
699
700 /* Can't enable any substates if L1 is not enabled */
701 if (!(state & ASPM_STATE_L1))
702 state &= ~ASPM_STATE_L1SS;
703
704 /* Spec says both ports must be in D0 before enabling PCI PM substates*/
705 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
706 state &= ~ASPM_STATE_L1_SS_PCIPM;
707 state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
708 }
709
710 /* Nothing to do if the link is already in the requested state */
452 if (link->aspm_enabled == state) 711 if (link->aspm_enabled == state)
453 return; 712 return;
454 /* Convert ASPM state to upstream/downstream ASPM register state */ 713 /* Convert ASPM state to upstream/downstream ASPM register state */
@@ -460,6 +719,10 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
460 upstream |= PCI_EXP_LNKCTL_ASPM_L1; 719 upstream |= PCI_EXP_LNKCTL_ASPM_L1;
461 dwstream |= PCI_EXP_LNKCTL_ASPM_L1; 720 dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
462 } 721 }
722
723 if (link->aspm_capable & ASPM_STATE_L1SS)
724 pcie_config_aspm_l1ss(link, state);
725
463 /* 726 /*
464 * Spec 2.0 suggests all functions should be configured the 727 * Spec 2.0 suggests all functions should be configured the
465 * same setting for ASPM. Enabling ASPM L1 should be done in 728 * same setting for ASPM. Enabling ASPM L1 should be done in
@@ -612,7 +875,8 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
612 * the BIOS's expectation, we'll do so once pci_enable_device() is 875 * the BIOS's expectation, we'll do so once pci_enable_device() is
613 * called. 876 * called.
614 */ 877 */
615 if (aspm_policy != POLICY_POWERSAVE) { 878 if (aspm_policy != POLICY_POWERSAVE &&
879 aspm_policy != POLICY_POWER_SUPERSAVE) {
616 pcie_config_aspm_path(link); 880 pcie_config_aspm_path(link);
617 pcie_set_clkpm(link, policy_to_clkpm_state(link)); 881 pcie_set_clkpm(link, policy_to_clkpm_state(link));
618 } 882 }
@@ -712,7 +976,8 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
712 if (aspm_disabled || !link) 976 if (aspm_disabled || !link)
713 return; 977 return;
714 978
715 if (aspm_policy != POLICY_POWERSAVE) 979 if (aspm_policy != POLICY_POWERSAVE &&
980 aspm_policy != POLICY_POWER_SUPERSAVE)
716 return; 981 return;
717 982
718 down_read(&pci_bus_sem); 983 down_read(&pci_bus_sem);
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index 9811b14d9ad8..d4d70ef4a2d7 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -19,8 +19,28 @@ struct dpc_dev {
19 struct pcie_device *dev; 19 struct pcie_device *dev;
20 struct work_struct work; 20 struct work_struct work;
21 int cap_pos; 21 int cap_pos;
22 bool rp;
22}; 23};
23 24
25static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
26{
27 unsigned long timeout = jiffies + HZ;
28 struct pci_dev *pdev = dpc->dev->port;
29 u16 status;
30
31 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
32 while (status & PCI_EXP_DPC_RP_BUSY &&
33 !time_after(jiffies, timeout)) {
34 msleep(10);
35 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
36 }
37 if (status & PCI_EXP_DPC_RP_BUSY) {
38 dev_warn(&pdev->dev, "DPC root port still busy\n");
39 return -EBUSY;
40 }
41 return 0;
42}
43
24static void dpc_wait_link_inactive(struct pci_dev *pdev) 44static void dpc_wait_link_inactive(struct pci_dev *pdev)
25{ 45{
26 unsigned long timeout = jiffies + HZ; 46 unsigned long timeout = jiffies + HZ;
@@ -33,7 +53,7 @@ static void dpc_wait_link_inactive(struct pci_dev *pdev)
33 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); 53 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
34 } 54 }
35 if (lnk_status & PCI_EXP_LNKSTA_DLLLA) 55 if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
36 dev_warn(&pdev->dev, "Link state not disabled for DPC event"); 56 dev_warn(&pdev->dev, "Link state not disabled for DPC event\n");
37} 57}
38 58
39static void interrupt_event_handler(struct work_struct *work) 59static void interrupt_event_handler(struct work_struct *work)
@@ -52,6 +72,8 @@ static void interrupt_event_handler(struct work_struct *work)
52 pci_unlock_rescan_remove(); 72 pci_unlock_rescan_remove();
53 73
54 dpc_wait_link_inactive(pdev); 74 dpc_wait_link_inactive(pdev);
75 if (dpc->rp && dpc_wait_rp_inactive(dpc))
76 return;
55 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, 77 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS,
56 PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT); 78 PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
57} 79}
@@ -73,11 +95,15 @@ static irqreturn_t dpc_irq(int irq, void *context)
73 95
74 if (status & PCI_EXP_DPC_STATUS_TRIGGER) { 96 if (status & PCI_EXP_DPC_STATUS_TRIGGER) {
75 u16 reason = (status >> 1) & 0x3; 97 u16 reason = (status >> 1) & 0x3;
98 u16 ext_reason = (status >> 5) & 0x3;
76 99
77 dev_warn(&dpc->dev->device, "DPC %s triggered, remove downstream devices\n", 100 dev_warn(&dpc->dev->device, "DPC %s detected, remove downstream devices\n",
78 (reason == 0) ? "unmasked uncorrectable error" : 101 (reason == 0) ? "unmasked uncorrectable error" :
79 (reason == 1) ? "ERR_NONFATAL" : 102 (reason == 1) ? "ERR_NONFATAL" :
80 (reason == 2) ? "ERR_FATAL" : "extended error"); 103 (reason == 2) ? "ERR_FATAL" :
104 (ext_reason == 0) ? "RP PIO error" :
105 (ext_reason == 1) ? "software trigger" :
106 "reserved error");
81 schedule_work(&dpc->work); 107 schedule_work(&dpc->work);
82 } 108 }
83 return IRQ_HANDLED; 109 return IRQ_HANDLED;
@@ -111,6 +137,8 @@ static int dpc_probe(struct pcie_device *dev)
111 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); 137 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
112 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); 138 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
113 139
140 dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT);
141
114 ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; 142 ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
115 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); 143 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
116 144
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 9698289f105c..cea504f6f478 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -44,52 +44,16 @@ static void release_pcie_device(struct device *dev)
44} 44}
45 45
46/** 46/**
47 * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
48 * @entries: Array of MSI-X entries
49 * @new_entry: Index of the entry to add to the array
50 * @nr_entries: Number of entries already in the array
51 *
52 * Return value: Position of the added entry in the array
53 */
54static int pcie_port_msix_add_entry(
55 struct msix_entry *entries, int new_entry, int nr_entries)
56{
57 int j;
58
59 for (j = 0; j < nr_entries; j++)
60 if (entries[j].entry == new_entry)
61 return j;
62
63 entries[j].entry = new_entry;
64 return j;
65}
66
67/**
68 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port 47 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
69 * @dev: PCI Express port to handle 48 * @dev: PCI Express port to handle
70 * @vectors: Array of interrupt vectors to populate 49 * @irqs: Array of interrupt vectors to populate
71 * @mask: Bitmask of port capabilities returned by get_port_device_capability() 50 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
72 * 51 *
73 * Return value: 0 on success, error code on failure 52 * Return value: 0 on success, error code on failure
74 */ 53 */
75static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) 54static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
76{ 55{
77 struct msix_entry *msix_entries; 56 int nr_entries, entry, nvec = 0;
78 int idx[PCIE_PORT_DEVICE_MAXSERVICES];
79 int nr_entries, status, pos, i, nvec;
80 u16 reg16;
81 u32 reg32;
82
83 nr_entries = pci_msix_vec_count(dev);
84 if (nr_entries < 0)
85 return nr_entries;
86 BUG_ON(!nr_entries);
87 if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
88 nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
89
90 msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
91 if (!msix_entries)
92 return -ENOMEM;
93 57
94 /* 58 /*
95 * Allocate as many entries as the port wants, so that we can check 59 * Allocate as many entries as the port wants, so that we can check
@@ -97,20 +61,13 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
97 * equal to the number of entries this port actually uses, we'll happily 61 * equal to the number of entries this port actually uses, we'll happily
98 * go through without any tricks. 62 * go through without any tricks.
99 */ 63 */
100 for (i = 0; i < nr_entries; i++) 64 nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
101 msix_entries[i].entry = i; 65 PCI_IRQ_MSIX);
102 66 if (nr_entries < 0)
103 status = pci_enable_msix_exact(dev, msix_entries, nr_entries); 67 return nr_entries;
104 if (status)
105 goto Exit;
106
107 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
108 idx[i] = -1;
109 status = -EIO;
110 nvec = 0;
111 68
112 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { 69 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
113 int entry; 70 u16 reg16;
114 71
115 /* 72 /*
116 * The code below follows the PCI Express Base Specification 2.0 73 * The code below follows the PCI Express Base Specification 2.0
@@ -125,18 +82,16 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
125 pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16); 82 pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
126 entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; 83 entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
127 if (entry >= nr_entries) 84 if (entry >= nr_entries)
128 goto Error; 85 goto out_free_irqs;
129 86
130 i = pcie_port_msix_add_entry(msix_entries, entry, nvec); 87 irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
131 if (i == nvec) 88 irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
132 nvec++;
133 89
134 idx[PCIE_PORT_SERVICE_PME_SHIFT] = i; 90 nvec = max(nvec, entry + 1);
135 idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
136 } 91 }
137 92
138 if (mask & PCIE_PORT_SERVICE_AER) { 93 if (mask & PCIE_PORT_SERVICE_AER) {
139 int entry; 94 u32 reg32, pos;
140 95
141 /* 96 /*
142 * The code below follows Section 7.10.10 of the PCI Express 97 * The code below follows Section 7.10.10 of the PCI Express
@@ -151,13 +106,11 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
151 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32); 106 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
152 entry = reg32 >> 27; 107 entry = reg32 >> 27;
153 if (entry >= nr_entries) 108 if (entry >= nr_entries)
154 goto Error; 109 goto out_free_irqs;
155 110
156 i = pcie_port_msix_add_entry(msix_entries, entry, nvec); 111 irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
157 if (i == nvec)
158 nvec++;
159 112
160 idx[PCIE_PORT_SERVICE_AER_SHIFT] = i; 113 nvec = max(nvec, entry + 1);
161 } 114 }
162 115
163 /* 116 /*
@@ -165,41 +118,39 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
165 * what we have. Otherwise, the port has some extra entries not for the 118 * what we have. Otherwise, the port has some extra entries not for the
166 * services we know and we need to work around that. 119 * services we know and we need to work around that.
167 */ 120 */
168 if (nvec == nr_entries) { 121 if (nvec != nr_entries) {
169 status = 0;
170 } else {
171 /* Drop the temporary MSI-X setup */ 122 /* Drop the temporary MSI-X setup */
172 pci_disable_msix(dev); 123 pci_free_irq_vectors(dev);
173 124
174 /* Now allocate the MSI-X vectors for real */ 125 /* Now allocate the MSI-X vectors for real */
175 status = pci_enable_msix_exact(dev, msix_entries, nvec); 126 nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
176 if (status) 127 PCI_IRQ_MSIX);
177 goto Exit; 128 if (nr_entries < 0)
129 return nr_entries;
178 } 130 }
179 131
180 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) 132 return 0;
181 vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
182
183 Exit:
184 kfree(msix_entries);
185 return status;
186 133
187 Error: 134out_free_irqs:
188 pci_disable_msix(dev); 135 pci_free_irq_vectors(dev);
189 goto Exit; 136 return -EIO;
190} 137}
191 138
192/** 139/**
193 * init_service_irqs - initialize irqs for PCI Express port services 140 * pcie_init_service_irqs - initialize irqs for PCI Express port services
194 * @dev: PCI Express port to handle 141 * @dev: PCI Express port to handle
195 * @irqs: Array of irqs to populate 142 * @irqs: Array of irqs to populate
196 * @mask: Bitmask of port capabilities returned by get_port_device_capability() 143 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
197 * 144 *
198 * Return value: Interrupt mode associated with the port 145 * Return value: Interrupt mode associated with the port
199 */ 146 */
200static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) 147static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
201{ 148{
202 int i, irq = -1; 149 unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
150 int ret, i;
151
152 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
153 irqs[i] = -1;
203 154
204 /* 155 /*
205 * If MSI cannot be used for PCIe PME or hotplug, we have to use 156 * If MSI cannot be used for PCIe PME or hotplug, we have to use
@@ -207,41 +158,25 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
207 */ 158 */
208 if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) || 159 if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
209 ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) { 160 ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
210 if (dev->irq) 161 flags &= ~PCI_IRQ_MSI;
211 irq = dev->irq; 162 } else {
212 goto no_msi; 163 /* Try to use MSI-X if supported */
164 if (!pcie_port_enable_msix(dev, irqs, mask))
165 return 0;
213 } 166 }
214 167
215 /* Try to use MSI-X if supported */ 168 ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
216 if (!pcie_port_enable_msix(dev, irqs, mask)) 169 if (ret < 0)
217 return 0; 170 return -ENODEV;
218
219 /*
220 * We're not going to use MSI-X, so try MSI and fall back to INTx.
221 * If neither MSI/MSI-X nor INTx available, try other interrupt. On
222 * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode.
223 */
224 if (!pci_enable_msi(dev) || dev->irq)
225 irq = dev->irq;
226 171
227 no_msi: 172 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
228 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) 173 if (i != PCIE_PORT_SERVICE_VC_SHIFT)
229 irqs[i] = irq; 174 irqs[i] = pci_irq_vector(dev, 0);
230 irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; 175 }
231 176
232 if (irq < 0)
233 return -ENODEV;
234 return 0; 177 return 0;
235} 178}
236 179
237static void cleanup_service_irqs(struct pci_dev *dev)
238{
239 if (dev->msix_enabled)
240 pci_disable_msix(dev);
241 else if (dev->msi_enabled)
242 pci_disable_msi(dev);
243}
244
245/** 180/**
246 * get_port_device_capability - discover capabilities of a PCI Express port 181 * get_port_device_capability - discover capabilities of a PCI Express port
247 * @dev: PCI Express port to examine 182 * @dev: PCI Express port to examine
@@ -378,7 +313,7 @@ int pcie_port_device_register(struct pci_dev *dev)
378 * that can be used in the absence of irqs. Allow them to determine 313 * that can be used in the absence of irqs. Allow them to determine
379 * if that is to be used. 314 * if that is to be used.
380 */ 315 */
381 status = init_service_irqs(dev, irqs, capabilities); 316 status = pcie_init_service_irqs(dev, irqs, capabilities);
382 if (status) { 317 if (status) {
383 capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP; 318 capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
384 if (!capabilities) 319 if (!capabilities)
@@ -401,7 +336,7 @@ int pcie_port_device_register(struct pci_dev *dev)
401 return 0; 336 return 0;
402 337
403error_cleanup_irqs: 338error_cleanup_irqs:
404 cleanup_service_irqs(dev); 339 pci_free_irq_vectors(dev);
405error_disable: 340error_disable:
406 pci_disable_device(dev); 341 pci_disable_device(dev);
407 return status; 342 return status;
@@ -469,7 +404,7 @@ static int remove_iter(struct device *dev, void *data)
469void pcie_port_device_remove(struct pci_dev *dev) 404void pcie_port_device_remove(struct pci_dev *dev)
470{ 405{
471 device_for_each_child(&dev->dev, NULL, remove_iter); 406 device_for_each_child(&dev->dev, NULL, remove_iter);
472 cleanup_service_irqs(dev); 407 pci_free_irq_vectors(dev);
473 pci_disable_device(dev); 408 pci_disable_device(dev);
474} 409}
475 410
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c9f0f0..3abc94212197 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1554,8 +1554,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1554 1554
1555static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) 1555static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1556{ 1556{
1557 if (hpp) 1557 int pos;
1558 dev_warn(&dev->dev, "PCI-X settings not supported\n"); 1558
1559 if (!hpp)
1560 return;
1561
1562 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1563 if (!pos)
1564 return;
1565
1566 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1559} 1567}
1560 1568
1561static bool pcie_root_rcb_set(struct pci_dev *dev) 1569static bool pcie_root_rcb_set(struct pci_dev *dev)
@@ -1581,6 +1589,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1581 if (!hpp) 1589 if (!hpp)
1582 return; 1590 return;
1583 1591
1592 if (!pci_is_pcie(dev))
1593 return;
1594
1584 if (hpp->revision > 1) { 1595 if (hpp->revision > 1) {
1585 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", 1596 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1586 hpp->revision); 1597 hpp->revision);
@@ -1650,12 +1661,30 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1650 */ 1661 */
1651} 1662}
1652 1663
1664static void pci_configure_extended_tags(struct pci_dev *dev)
1665{
1666 u32 dev_cap;
1667 int ret;
1668
1669 if (!pci_is_pcie(dev))
1670 return;
1671
1672 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap);
1673 if (ret)
1674 return;
1675
1676 if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG)
1677 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1678 PCI_EXP_DEVCTL_EXT_TAG);
1679}
1680
1653static void pci_configure_device(struct pci_dev *dev) 1681static void pci_configure_device(struct pci_dev *dev)
1654{ 1682{
1655 struct hotplug_params hpp; 1683 struct hotplug_params hpp;
1656 int ret; 1684 int ret;
1657 1685
1658 pci_configure_mps(dev); 1686 pci_configure_mps(dev);
1687 pci_configure_extended_tags(dev);
1659 1688
1660 memset(&hpp, 0, sizeof(hpp)); 1689 memset(&hpp, 0, sizeof(hpp));
1661 ret = pci_get_hp_params(dev, &hpp); 1690 ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1800befa8b8b..6079a4ddf132 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1634,6 +1634,7 @@ static void quirk_pcie_mch(struct pci_dev *pdev)
1634DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); 1634DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1635DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); 1635DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1636DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); 1636DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1637DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
1637 1638
1638 1639
1639/* 1640/*
@@ -2239,6 +2240,27 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2239 PCI_DEVICE_ID_TIGON3_5719, 2240 PCI_DEVICE_ID_TIGON3_5719,
2240 quirk_brcm_5719_limit_mrrs); 2241 quirk_brcm_5719_limit_mrrs);
2241 2242
2243#ifdef CONFIG_PCIE_IPROC_PLATFORM
2244static void quirk_paxc_bridge(struct pci_dev *pdev)
2245{
2246 /* The PCI config space is shared with the PAXC root port and the first
2247 * Ethernet device. So, we need to workaround this by telling the PCI
2248 * code that the bridge is not an Ethernet device.
2249 */
2250 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2251 pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
2252
2253 /* MPSS is not being set properly (as it is currently 0). This is
2254 * because that area of the PCI config space is hard coded to zero, and
2255 * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
2256 * so that the MPS can be set to the real max value.
2257 */
2258 pdev->pcie_mpss = 2;
2259}
2260DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
2261DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
2262#endif
2263
2242/* Originally in EDAC sources for i82875P: 2264/* Originally in EDAC sources for i82875P:
2243 * Intel tells BIOS developers to hide device 6 which 2265 * Intel tells BIOS developers to hide device 6 which
2244 * configures the overflow device access containing 2266 * configures the overflow device access containing
@@ -4150,15 +4172,35 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4150 * 4172 *
4151 * N.B. This doesn't fix what lspci shows. 4173 * N.B. This doesn't fix what lspci shows.
4152 * 4174 *
4175 * The 100 series chipset specification update includes this as errata #23[3].
4176 *
4177 * The 200 series chipset (Union Point) has the same bug according to the
4178 * specification update (Intel 200 Series Chipset Family Platform Controller
4179 * Hub, Specification Update, January 2017, Revision 001, Document# 335194-001,
4180 * Errata 22)[4]. Per the datasheet[5], root port PCI Device IDs for this
4181 * chipset include:
4182 *
4183 * 0xa290-0xa29f PCI Express Root port #{0-16}
4184 * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
4185 *
4153 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html 4186 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
4154 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html 4187 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
4188 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
4189 * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
4190 * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
4155 */ 4191 */
4156static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) 4192static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4157{ 4193{
4158 return pci_is_pcie(dev) && 4194 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4159 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT && 4195 return false;
4160 ((dev->device & ~0xf) == 0xa110 || 4196
4161 (dev->device >= 0xa167 && dev->device <= 0xa16a)); 4197 switch (dev->device) {
4198 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
4199 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
4200 return true;
4201 }
4202
4203 return false;
4162} 4204}
4163 4205
4164#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4) 4206#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f30ca75b5b6c..cb389277df41 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -105,17 +105,8 @@ static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
105 struct pci_dev_resource *dev_res; 105 struct pci_dev_resource *dev_res;
106 106
107 list_for_each_entry(dev_res, head, list) { 107 list_for_each_entry(dev_res, head, list) {
108 if (dev_res->res == res) { 108 if (dev_res->res == res)
109 int idx = res - &dev_res->dev->resource[0];
110
111 dev_printk(KERN_DEBUG, &dev_res->dev->dev,
112 "res[%d]=%pR res_to_dev_res add_size %llx min_align %llx\n",
113 idx, dev_res->res,
114 (unsigned long long)dev_res->add_size,
115 (unsigned long long)dev_res->min_align);
116
117 return dev_res; 109 return dev_res;
118 }
119 } 110 }
120 111
121 return NULL; 112 return NULL;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index e8eb7f225a88..bbad035c60cc 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -331,6 +331,14 @@ config PHY_EXYNOS5_USBDRD
331 This driver provides PHY interface for USB 3.0 DRD controller 331 This driver provides PHY interface for USB 3.0 DRD controller
332 present on Exynos5 SoC series. 332 present on Exynos5 SoC series.
333 333
334config PHY_EXYNOS_PCIE
335 bool "Exynos PCIe PHY driver"
336 depends on OF && (ARCH_EXYNOS || COMPILE_TEST)
337 select GENERIC_PHY
338 help
339 Enable PCIe PHY support for Exynos SoC series.
340 This driver provides PHY interface for Exynos PCIe controller.
341
334config PHY_PISTACHIO_USB 342config PHY_PISTACHIO_USB
335 tristate "IMG Pistachio USB2.0 PHY driver" 343 tristate "IMG Pistachio USB2.0 PHY driver"
336 depends on MACH_PISTACHIO 344 depends on MACH_PISTACHIO
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 65eb2f436a41..081aeb4efd13 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -37,6 +37,7 @@ phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
37phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o 37phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
38phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o 38phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o
39obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o 39obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o
40obj-$(CONFIG_PHY_EXYNOS_PCIE) += phy-exynos-pcie.o
40obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o 41obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
41obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o 42obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
42obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o 43obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c
new file mode 100644
index 000000000000..4f60b83641d5
--- /dev/null
+++ b/drivers/phy/phy-exynos-pcie.c
@@ -0,0 +1,285 @@
1/*
2 * Samsung EXYNOS SoC series PCIe PHY driver
3 *
4 * Phy provider for PCIe controller on Exynos SoC series
5 *
6 * Copyright (C) 2017 Samsung Electronics Co., Ltd.
7 * Jaehoon Chung <jh80.chung@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/iopoll.h>
17#include <linux/mfd/syscon.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_address.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/phy/phy.h>
24#include <linux/regmap.h>
25
26/* PCIe Purple registers */
27#define PCIE_PHY_GLOBAL_RESET 0x000
28#define PCIE_PHY_COMMON_RESET 0x004
29#define PCIE_PHY_CMN_REG 0x008
30#define PCIE_PHY_MAC_RESET 0x00c
31#define PCIE_PHY_PLL_LOCKED 0x010
32#define PCIE_PHY_TRSVREG_RESET 0x020
33#define PCIE_PHY_TRSV_RESET 0x024
34
35/* PCIe PHY registers */
36#define PCIE_PHY_IMPEDANCE 0x004
37#define PCIE_PHY_PLL_DIV_0 0x008
38#define PCIE_PHY_PLL_BIAS 0x00c
39#define PCIE_PHY_DCC_FEEDBACK 0x014
40#define PCIE_PHY_PLL_DIV_1 0x05c
41#define PCIE_PHY_COMMON_POWER 0x064
42#define PCIE_PHY_COMMON_PD_CMN BIT(3)
43#define PCIE_PHY_TRSV0_EMP_LVL 0x084
44#define PCIE_PHY_TRSV0_DRV_LVL 0x088
45#define PCIE_PHY_TRSV0_RXCDR 0x0ac
46#define PCIE_PHY_TRSV0_POWER 0x0c4
47#define PCIE_PHY_TRSV0_PD_TSV BIT(7)
48#define PCIE_PHY_TRSV0_LVCC 0x0dc
49#define PCIE_PHY_TRSV1_EMP_LVL 0x144
50#define PCIE_PHY_TRSV1_RXCDR 0x16c
51#define PCIE_PHY_TRSV1_POWER 0x184
52#define PCIE_PHY_TRSV1_PD_TSV BIT(7)
53#define PCIE_PHY_TRSV1_LVCC 0x19c
54#define PCIE_PHY_TRSV2_EMP_LVL 0x204
55#define PCIE_PHY_TRSV2_RXCDR 0x22c
56#define PCIE_PHY_TRSV2_POWER 0x244
57#define PCIE_PHY_TRSV2_PD_TSV BIT(7)
58#define PCIE_PHY_TRSV2_LVCC 0x25c
59#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
60#define PCIE_PHY_TRSV3_RXCDR 0x2ec
61#define PCIE_PHY_TRSV3_POWER 0x304
62#define PCIE_PHY_TRSV3_PD_TSV BIT(7)
63#define PCIE_PHY_TRSV3_LVCC 0x31c
64
65struct exynos_pcie_phy_data {
66 const struct phy_ops *ops;
67};
68
69/* For Exynos pcie phy */
70struct exynos_pcie_phy {
71 const struct exynos_pcie_phy_data *drv_data;
72 void __iomem *phy_base;
73 void __iomem *blk_base; /* For exynos5440 */
74};
75
76static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset)
77{
78 writel(val, base + offset);
79}
80
81static u32 exynos_pcie_phy_readl(void __iomem *base, u32 offset)
82{
83 return readl(base + offset);
84}
85
86/* For Exynos5440 specific functions */
87static int exynos5440_pcie_phy_init(struct phy *phy)
88{
89 struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
90
91 /* DCC feedback control off */
92 exynos_pcie_phy_writel(ep->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK);
93
94 /* set TX/RX impedance */
95 exynos_pcie_phy_writel(ep->phy_base, 0xd5, PCIE_PHY_IMPEDANCE);
96
97 /* set 50Mhz PHY clock */
98 exynos_pcie_phy_writel(ep->phy_base, 0x14, PCIE_PHY_PLL_DIV_0);
99 exynos_pcie_phy_writel(ep->phy_base, 0x12, PCIE_PHY_PLL_DIV_1);
100
101 /* set TX Differential output for lane 0 */
102 exynos_pcie_phy_writel(ep->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
103
104 /* set TX Pre-emphasis Level Control for lane 0 to minimum */
105 exynos_pcie_phy_writel(ep->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
106
107 /* set RX clock and data recovery bandwidth */
108 exynos_pcie_phy_writel(ep->phy_base, 0xe7, PCIE_PHY_PLL_BIAS);
109 exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR);
110 exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR);
111 exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR);
112 exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR);
113
114 /* change TX Pre-emphasis Level Control for lanes */
115 exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
116 exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
117 exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
118 exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
119
120 /* set LVCC */
121 exynos_pcie_phy_writel(ep->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC);
122 exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC);
123 exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC);
124 exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC);
125
126 /* pulse for common reset */
127 exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_COMMON_RESET);
128 udelay(500);
129 exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET);
130
131 return 0;
132}
133
134static int exynos5440_pcie_phy_power_on(struct phy *phy)
135{
136 struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
137 u32 val;
138
139 exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET);
140 exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_CMN_REG);
141 exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSVREG_RESET);
142 exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSV_RESET);
143
144 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER);
145 val &= ~PCIE_PHY_COMMON_PD_CMN;
146 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER);
147
148 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER);
149 val &= ~PCIE_PHY_TRSV0_PD_TSV;
150 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER);
151
152 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER);
153 val &= ~PCIE_PHY_TRSV1_PD_TSV;
154 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER);
155
156 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER);
157 val &= ~PCIE_PHY_TRSV2_PD_TSV;
158 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER);
159
160 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER);
161 val &= ~PCIE_PHY_TRSV3_PD_TSV;
162 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER);
163
164 return 0;
165}
166
167static int exynos5440_pcie_phy_power_off(struct phy *phy)
168{
169 struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
170 u32 val;
171
172 if (readl_poll_timeout(ep->phy_base + PCIE_PHY_PLL_LOCKED, val,
173 (val != 0), 1, 500)) {
174 dev_err(&phy->dev, "PLL Locked: 0x%x\n", val);
175 return -ETIMEDOUT;
176 }
177
178 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER);
179 val |= PCIE_PHY_COMMON_PD_CMN;
180 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER);
181
182 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER);
183 val |= PCIE_PHY_TRSV0_PD_TSV;
184 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER);
185
186 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER);
187 val |= PCIE_PHY_TRSV1_PD_TSV;
188 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER);
189
190 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER);
191 val |= PCIE_PHY_TRSV2_PD_TSV;
192 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER);
193
194 val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER);
195 val |= PCIE_PHY_TRSV3_PD_TSV;
196 exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER);
197
198 return 0;
199}
200
201static int exynos5440_pcie_phy_reset(struct phy *phy)
202{
203 struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
204
205 exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_MAC_RESET);
206 exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_GLOBAL_RESET);
207 exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_GLOBAL_RESET);
208
209 return 0;
210}
211
212static const struct phy_ops exynos5440_phy_ops = {
213 .init = exynos5440_pcie_phy_init,
214 .power_on = exynos5440_pcie_phy_power_on,
215 .power_off = exynos5440_pcie_phy_power_off,
216 .reset = exynos5440_pcie_phy_reset,
217 .owner = THIS_MODULE,
218};
219
220static const struct exynos_pcie_phy_data exynos5440_pcie_phy_data = {
221 .ops = &exynos5440_phy_ops,
222};
223
224static const struct of_device_id exynos_pcie_phy_match[] = {
225 {
226 .compatible = "samsung,exynos5440-pcie-phy",
227 .data = &exynos5440_pcie_phy_data,
228 },
229 {},
230};
231MODULE_DEVICE_TABLE(of, exynos_pcie_phy_match);
232
233static int exynos_pcie_phy_probe(struct platform_device *pdev)
234{
235 struct device *dev = &pdev->dev;
236 struct exynos_pcie_phy *exynos_phy;
237 struct phy *generic_phy;
238 struct phy_provider *phy_provider;
239 struct resource *res;
240 const struct exynos_pcie_phy_data *drv_data;
241
242 drv_data = of_device_get_match_data(dev);
243 if (!drv_data)
244 return -ENODEV;
245
246 exynos_phy = devm_kzalloc(dev, sizeof(*exynos_phy), GFP_KERNEL);
247 if (!exynos_phy)
248 return -ENOMEM;
249
250 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
251 exynos_phy->phy_base = devm_ioremap_resource(dev, res);
252 if (IS_ERR(exynos_phy->phy_base))
253 return PTR_ERR(exynos_phy->phy_base);
254
255 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
256 exynos_phy->blk_base = devm_ioremap_resource(dev, res);
257 if (IS_ERR(exynos_phy->phy_base))
258 return PTR_ERR(exynos_phy->phy_base);
259
260 exynos_phy->drv_data = drv_data;
261
262 generic_phy = devm_phy_create(dev, dev->of_node, drv_data->ops);
263 if (IS_ERR(generic_phy)) {
264 dev_err(dev, "failed to create PHY\n");
265 return PTR_ERR(generic_phy);
266 }
267
268 phy_set_drvdata(generic_phy, exynos_phy);
269 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
270
271 return PTR_ERR_OR_ZERO(phy_provider);
272}
273
274static struct platform_driver exynos_pcie_phy_driver = {
275 .probe = exynos_pcie_phy_probe,
276 .driver = {
277 .of_match_table = exynos_pcie_phy_match,
278 .name = "exynos_pcie_phy",
279 }
280};
281module_platform_driver(exynos_pcie_phy_driver);
282
283MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC PCIe PHY driver");
284MODULE_AUTHOR("Jaehoon Chung <jh80.chung@samsung.com>");
285MODULE_LICENSE("GPL v2");
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 0db320b7bb15..1b6f3ebbe876 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -316,12 +316,6 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
316struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, 316struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
317 struct msi_domain_info *info, 317 struct msi_domain_info *info,
318 struct irq_domain *parent); 318 struct irq_domain *parent);
319int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
320 int nvec, int type);
321void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
322struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
323 struct msi_domain_info *info, struct irq_domain *parent);
324
325irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, 319irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
326 struct msi_desc *desc); 320 struct msi_desc *desc);
327int pci_msi_domain_check_cap(struct irq_domain *domain, 321int pci_msi_domain_check_cap(struct irq_domain *domain,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e2d1a124216a..6732d327c2d6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -678,9 +678,6 @@ struct pci_error_handlers {
678 /* MMIO has been re-enabled, but not DMA */ 678 /* MMIO has been re-enabled, but not DMA */
679 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); 679 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
680 680
681 /* PCI Express link has been reset */
682 pci_ers_result_t (*link_reset)(struct pci_dev *dev);
683
684 /* PCI slot has been reset */ 681 /* PCI slot has been reset */
685 pci_ers_result_t (*slot_reset)(struct pci_dev *dev); 682 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
686 683
@@ -1309,14 +1306,7 @@ void pci_msix_shutdown(struct pci_dev *dev);
1309void pci_disable_msix(struct pci_dev *dev); 1306void pci_disable_msix(struct pci_dev *dev);
1310void pci_restore_msi_state(struct pci_dev *dev); 1307void pci_restore_msi_state(struct pci_dev *dev);
1311int pci_msi_enabled(void); 1308int pci_msi_enabled(void);
1312int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); 1309int pci_enable_msi(struct pci_dev *dev);
1313static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1314{
1315 int rc = pci_enable_msi_range(dev, nvec, nvec);
1316 if (rc < 0)
1317 return rc;
1318 return 0;
1319}
1320int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1310int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1321 int minvec, int maxvec); 1311 int minvec, int maxvec);
1322static inline int pci_enable_msix_exact(struct pci_dev *dev, 1312static inline int pci_enable_msix_exact(struct pci_dev *dev,
@@ -1347,10 +1337,7 @@ static inline void pci_msix_shutdown(struct pci_dev *dev) { }
1347static inline void pci_disable_msix(struct pci_dev *dev) { } 1337static inline void pci_disable_msix(struct pci_dev *dev) { }
1348static inline void pci_restore_msi_state(struct pci_dev *dev) { } 1338static inline void pci_restore_msi_state(struct pci_dev *dev) { }
1349static inline int pci_msi_enabled(void) { return 0; } 1339static inline int pci_msi_enabled(void) { return 0; }
1350static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, 1340static inline int pci_enable_msi(struct pci_dev *dev)
1351 int maxvec)
1352{ return -ENOSYS; }
1353static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1354{ return -ENOSYS; } 1341{ return -ENOSYS; }
1355static inline int pci_enable_msix_range(struct pci_dev *dev, 1342static inline int pci_enable_msix_range(struct pci_dev *dev,
1356 struct msix_entry *entries, int minvec, int maxvec) 1343 struct msix_entry *entries, int minvec, int maxvec)
@@ -1426,8 +1413,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
1426static inline void pcie_ecrc_get_policy(char *str) { } 1413static inline void pcie_ecrc_get_policy(char *str) { }
1427#endif 1414#endif
1428 1415
1429#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1)
1430
1431#ifdef CONFIG_HT_IRQ 1416#ifdef CONFIG_HT_IRQ
1432/* The functions a driver should call */ 1417/* The functions a driver should call */
1433int ht_create_irq(struct pci_dev *dev, int idx); 1418int ht_create_irq(struct pci_dev *dev, int idx);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 73dda0edcb97..a4f77feecbb0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2516,6 +2516,8 @@
2516#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 2516#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
2517#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff 2517#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
2518 2518
2519#define PCI_VENDOR_ID_HUAWEI 0x19e5
2520
2519#define PCI_VENDOR_ID_NETRONOME 0x19ee 2521#define PCI_VENDOR_ID_NETRONOME 0x19ee
2520#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200 2522#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
2521#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240 2523#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 174d1147081b..634c9c44ed6c 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -682,6 +682,7 @@
682#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */ 682#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
683#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ 683#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
684#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ 684#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
685#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
685#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ 686#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
686#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM 687#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM
687 688
@@ -973,6 +974,7 @@
973#define PCI_EXP_DPC_STATUS 8 /* DPC Status */ 974#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
974#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */ 975#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */
975#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */ 976#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */
977#define PCI_EXP_DPC_RP_BUSY 0x10 /* Root Port Busy */
976 978
977#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ 979#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
978 980
@@ -985,4 +987,19 @@
985#define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */ 987#define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */
986#define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */ 988#define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */
987 989
990/* L1 PM Substates */
991#define PCI_L1SS_CAP 4 /* capability register */
992#define PCI_L1SS_CAP_PCIPM_L1_2 1 /* PCI PM L1.2 Support */
993#define PCI_L1SS_CAP_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
994#define PCI_L1SS_CAP_ASPM_L1_2 4 /* ASPM L1.2 Support */
995#define PCI_L1SS_CAP_ASPM_L1_1 8 /* ASPM L1.1 Support */
996#define PCI_L1SS_CAP_L1_PM_SS 16 /* L1 PM Substates Support */
997#define PCI_L1SS_CTL1 8 /* Control Register 1 */
998#define PCI_L1SS_CTL1_PCIPM_L1_2 1 /* PCI PM L1.2 Enable */
999#define PCI_L1SS_CTL1_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
1000#define PCI_L1SS_CTL1_ASPM_L1_2 4 /* ASPM L1.2 Support */
1001#define PCI_L1SS_CTL1_ASPM_L1_1 8 /* ASPM L1.1 Support */
1002#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000F
1003#define PCI_L1SS_CTL2 0xC /* Control Register 2 */
1004
988#endif /* LINUX_PCI_REGS_H */ 1005#endif /* LINUX_PCI_REGS_H */