aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci10
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt25
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt63
-rw-r--r--Documentation/devicetree/bindings/pci/xgene-pci.txt57
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-pcie.txt62
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi18
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi6
-rw-r--r--arch/arm/include/asm/io.h1
-rw-r--r--arch/arm/mach-integrator/pci_v3.c23
-rw-r--r--arch/arm64/Kconfig22
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts8
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi165
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/io.h3
-rw-r--r--arch/arm64/include/asm/pci.h37
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/pci.c70
-rw-r--r--arch/ia64/kernel/msi_ia64.c2
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c4
-rw-r--r--arch/mips/pci/msi-octeon.c6
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/kernel/msi.c12
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci.c19
-rw-r--r--arch/powerpc/platforms/pseries/msi.c44
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c12
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c11
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c28
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c18
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c19
-rw-r--r--arch/x86/pci/common.c20
-rw-r--r--arch/x86/pci/mmconfig-shared.c40
-rw-r--r--arch/x86/pci/pcbios.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c14
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h1
-rw-r--r--drivers/of/address.c154
-rw-r--r--drivers/of/of_pci.c142
-rw-r--r--drivers/pci/host/Kconfig28
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-imx6.c13
-rw-r--r--drivers/pci/host/pci-keystone-dw.c516
-rw-r--r--drivers/pci/host/pci-keystone.c415
-rw-r--r--drivers/pci/host/pci-keystone.h58
-rw-r--r--drivers/pci/host/pci-mvebu.c6
-rw-r--r--drivers/pci/host/pci-tegra.c277
-rw-r--r--drivers/pci/host/pci-xgene.c659
-rw-r--r--drivers/pci/host/pcie-designware.c268
-rw-r--r--drivers/pci/host/pcie-designware.h22
-rw-r--r--drivers/pci/host/pcie-rcar.c21
-rw-r--r--drivers/pci/host/pcie-spear13xx.c2
-rw-r--r--drivers/pci/host/pcie-xilinx.c970
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c254
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c11
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c13
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c28
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c44
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c19
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c13
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c19
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c3
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c3
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c45
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c17
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c9
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c176
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c14
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c5
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c8
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c75
-rw-r--r--drivers/pci/pci-acpi.c276
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-sysfs.c41
-rw-r--r--drivers/pci/pci.c57
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c11
-rw-r--r--drivers/pci/pcie/portdrv_pci.c74
-rw-r--r--drivers/pci/probe.c167
-rw-r--r--drivers/pci/quirks.c119
-rw-r--r--drivers/pci/search.c34
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.h1
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c4
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/of_address.h27
-rw-r--r--include/linux/of_pci.h13
-rw-r--r--include/linux/pci.h60
-rw-r--r--include/linux/pci_hotplug.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/ras/ras_event.h48
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--kernel/resource.c70
-rw-r--r--virt/kvm/assigned-dev.c2
-rw-r--r--virt/kvm/iommu.c4
111 files changed, 4911 insertions, 1333 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 6615fda0abfb..ee6c04036492 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -65,6 +65,16 @@ Description:
65 force a rescan of all PCI buses in the system, and 65 force a rescan of all PCI buses in the system, and
66 re-discover previously removed devices. 66 re-discover previously removed devices.
67 67
68What: /sys/bus/pci/devices/.../msi_bus
69Date: September 2014
70Contact: Linux PCI developers <linux-pci@vger.kernel.org>
71Description:
72 Writing a zero value to this attribute disallows MSI and
73 MSI-X for any future drivers of the device. If the device
74 is a bridge, MSI and MSI-X will be disallowed for future
75 drivers of all child devices under the bridge. Drivers
76 must be reloaded for the new setting to take effect.
77
68What: /sys/bus/pci/devices/.../msi_irqs/ 78What: /sys/bus/pci/devices/.../msi_irqs/
69Date: September, 2011 79Date: September, 2011
70Contact: Neil Horman <nhorman@tuxdriver.com> 80Contact: Neil Horman <nhorman@tuxdriver.com>
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index ed0d9b9fff2b..9f4faa8e8d00 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -23,3 +23,6 @@ Required properties:
23 23
24Optional properties: 24Optional properties:
25- reset-gpio: gpio pin number of power good signal 25- reset-gpio: gpio pin number of power good signal
26- bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
27 specify this property, to keep backwards compatibility a range of 0x00-0xff
28 is assumed if not present)
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
index 0823362548dc..d763e047c6ae 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
@@ -1,7 +1,10 @@
1NVIDIA Tegra PCIe controller 1NVIDIA Tegra PCIe controller
2 2
3Required properties: 3Required properties:
4- compatible: "nvidia,tegra20-pcie" or "nvidia,tegra30-pcie" 4- compatible: Must be one of:
5 - "nvidia,tegra20-pcie"
6 - "nvidia,tegra30-pcie"
7 - "nvidia,tegra124-pcie"
5- device_type: Must be "pci" 8- device_type: Must be "pci"
6- reg: A list of physical base address and length for each set of controller 9- reg: A list of physical base address and length for each set of controller
7 registers. Must contain an entry for each entry in the reg-names property. 10 registers. Must contain an entry for each entry in the reg-names property.
@@ -57,6 +60,11 @@ Required properties:
57 - afi 60 - afi
58 - pcie_x 61 - pcie_x
59 62
63Required properties on Tegra124 and later:
64- phys: Must contain an entry for each entry in phy-names.
65- phy-names: Must include the following entries:
66 - pcie
67
60Power supplies for Tegra20: 68Power supplies for Tegra20:
61- avdd-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V. 69- avdd-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
62- vdd-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V. 70- vdd-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
@@ -84,6 +92,21 @@ Power supplies for Tegra30:
84 - avdd-pexb-supply: Power supply for analog PCIe logic. Must supply 1.05 V. 92 - avdd-pexb-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
85 - vdd-pexb-supply: Power supply for digital PCIe I/O. Must supply 1.05 V. 93 - vdd-pexb-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
86 94
95Power supplies for Tegra124:
96- Required:
97 - avddio-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
98 - dvddio-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
99 - avdd-pex-pll-supply: Power supply for dedicated (internal) PCIe PLL. Must
100 supply 1.05 V.
101 - hvdd-pex-supply: High-voltage supply for PCIe I/O and PCIe output clocks.
102 Must supply 3.3 V.
103 - hvdd-pex-pll-e-supply: High-voltage supply for PLLE (shared with USB3).
104 Must supply 3.3 V.
105 - vddio-pex-ctl-supply: Power supply for PCIe control I/O partition. Must
106 supply 2.8-3.3 V.
107 - avdd-pll-erefe-supply: Power supply for PLLE (shared with USB3). Must
108 supply 1.05 V.
109
87Root ports are defined as subnodes of the PCIe controller node. 110Root ports are defined as subnodes of the PCIe controller node.
88 111
89Required properties: 112Required properties:
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
new file mode 100644
index 000000000000..54eae2938174
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -0,0 +1,63 @@
1TI Keystone PCIe interface
2
3Keystone PCI host Controller is based on Designware PCI h/w version 3.65.
4It shares common functions with PCIe Designware core driver and inherit
5common properties defined in
6Documentation/devicetree/bindings/pci/designware-pci.txt
7
8Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt
9for the details of Designware DT bindings. Additional properties are
10described here as well as properties that are not applicable.
11
12Required Properties:-
13
14compatibility: "ti,keystone-pcie"
15reg: index 1 is the base address and length of DW application registers.
16 index 2 is the base address and length of PCI device ID register.
17
18pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
19 interrupt-cells: should be set to 1
20 interrupt-parent: Parent interrupt controller phandle
21 interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
22
23 Example:
24 pcie_msi_intc: msi-interrupt-controller {
25 interrupt-controller;
26 #interrupt-cells = <1>;
27 interrupt-parent = <&gic>;
28 interrupts = <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
29 <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
30 <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>,
31 <GIC_SPI 33 IRQ_TYPE_EDGE_RISING>,
32 <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
33 <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
34 <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
35 <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>;
36 };
37
38pcie_intc: Interrupt controller device node for Legacy IRQ chip
39 interrupt-cells: should be set to 1
40 interrupt-parent: Parent interrupt controller phandle
41 interrupts: GIC interrupt lines connected to PCI Legacy interrupt lines
42
43 Example:
44 pcie_intc: legacy-interrupt-controller {
45 interrupt-controller;
46 #interrupt-cells = <1>;
47 interrupt-parent = <&gic>;
48 interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
49 <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
50 <GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
51 <GIC_SPI 29 IRQ_TYPE_EDGE_RISING>;
52 };
53
54Optional properties:-
55 phys: phandle to Generic Keystone SerDes phy for PCI
56 phy-names: name of the Generic Keystine SerDes phy for PCI
57 - If boot loader already does PCI link establishment, then phys and
58 phy-names shouldn't be present.
59
60Designware DT Properties not applicable for Keystone PCI
61
621. pcie_bus clock-names not used. Instead, a phandle to phys is used.
63
diff --git a/Documentation/devicetree/bindings/pci/xgene-pci.txt b/Documentation/devicetree/bindings/pci/xgene-pci.txt
new file mode 100644
index 000000000000..1070b068c7c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xgene-pci.txt
@@ -0,0 +1,57 @@
1* AppliedMicro X-Gene PCIe interface
2
3Required properties:
4- device_type: set to "pci"
5- compatible: should contain "apm,xgene-pcie" to identify the core.
6- reg: A list of physical base address and length for each set of controller
7 registers. Must contain an entry for each entry in the reg-names
8 property.
9- reg-names: Must include the following entries:
10 "csr": controller configuration registers.
11 "cfg": pcie configuration space registers.
12- #address-cells: set to <3>
13- #size-cells: set to <2>
14- ranges: ranges for the outbound memory, I/O regions.
15- dma-ranges: ranges for the inbound memory regions.
16- #interrupt-cells: set to <1>
17- interrupt-map-mask and interrupt-map: standard PCI properties
18 to define the mapping of the PCIe interface to interrupt
19 numbers.
20- clocks: from common clock binding: handle to pci clock.
21
22Optional properties:
23- status: Either "ok" or "disabled".
24- dma-coherent: Present if dma operations are coherent
25
26Example:
27
28SoC specific DT Entry:
29
30 pcie0: pcie@1f2b0000 {
31 status = "disabled";
32 device_type = "pci";
33 compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
34 #interrupt-cells = <1>;
35 #size-cells = <2>;
36 #address-cells = <3>;
37 reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
38 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
39 reg-names = "csr", "cfg";
40 ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
41 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
42 dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
43 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
44 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
45 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
46 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
47 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
48 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
49 dma-coherent;
50 clocks = <&pcie0clk 0>;
51 };
52
53
54Board specific DT Entry:
55 &pcie0 {
56 status = "ok";
57 };
diff --git a/Documentation/devicetree/bindings/pci/xilinx-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-pcie.txt
new file mode 100644
index 000000000000..3e2c88d97ad4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xilinx-pcie.txt
@@ -0,0 +1,62 @@
1* Xilinx AXI PCIe Root Port Bridge DT description
2
3Required properties:
4- #address-cells: Address representation for root ports, set to <3>
5- #size-cells: Size representation for root ports, set to <2>
6- #interrupt-cells: specifies the number of cells needed to encode an
7 interrupt source. The value must be 1.
8- compatible: Should contain "xlnx,axi-pcie-host-1.00.a"
9- reg: Should contain AXI PCIe registers location and length
10- device_type: must be "pci"
11- interrupts: Should contain AXI PCIe interrupt
12- interrupt-map-mask,
13 interrupt-map: standard PCI properties to define the mapping of the
14 PCI interface to interrupt numbers.
15- ranges: ranges for the PCI memory regions (I/O space region is not
16 supported by hardware)
17 Please refer to the standard PCI bus binding document for a more
18 detailed explanation
19
20Optional properties:
21- bus-range: PCI bus numbers covered
22
23Interrupt controller child node
24+++++++++++++++++++++++++++++++
25Required properties:
26- interrupt-controller: identifies the node as an interrupt controller
27- #address-cells: specifies the number of cells needed to encode an
28 address. The value must be 0.
29- #interrupt-cells: specifies the number of cells needed to encode an
30 interrupt source. The value must be 1.
31
32NOTE:
33The core provides a single interrupt for both INTx/MSI messages. So,
34created a interrupt controller node to support 'interrupt-map' DT
35functionality. The driver will create an IRQ domain for this map, decode
36the four INTx interrupts in ISR and route them to this domain.
37
38
39Example:
40++++++++
41
42 pci_express: axi-pcie@50000000 {
43 #address-cells = <3>;
44 #size-cells = <2>;
45 #interrupt-cells = <1>;
46 compatible = "xlnx,axi-pcie-host-1.00.a";
47 reg = < 0x50000000 0x10000000 >;
48 device_type = "pci";
49 interrupts = < 0 52 4 >;
50 interrupt-map-mask = <0 0 0 7>;
51 interrupt-map = <0 0 0 1 &pcie_intc 1>,
52 <0 0 0 2 &pcie_intc 2>,
53 <0 0 0 3 &pcie_intc 3>,
54 <0 0 0 4 &pcie_intc 4>;
55 ranges = < 0x02000000 0 0x60000000 0x60000000 0 0x10000000 >;
56
57 pcie_intc: interrupt-controller {
58 interrupt-controller;
59 #address-cells = <0>;
60 #interrupt-cells = <1>;
61 }
62 };
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 40677443c0c5..b5ab416cd53a 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -264,8 +264,10 @@ IIO
264IO region 264IO region
265 devm_release_mem_region() 265 devm_release_mem_region()
266 devm_release_region() 266 devm_release_region()
267 devm_release_resource()
267 devm_request_mem_region() 268 devm_request_mem_region()
268 devm_request_region() 269 devm_request_region()
270 devm_request_resource()
269 271
270IOMAP 272IOMAP
271 devm_ioport_map() 273 devm_ioport_map()
diff --git a/MAINTAINERS b/MAINTAINERS
index 0b23084070c2..75b98b4958c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6939,6 +6939,14 @@ F: include/linux/pci*
6939F: arch/x86/pci/ 6939F: arch/x86/pci/
6940F: arch/x86/kernel/quirks.c 6940F: arch/x86/kernel/quirks.c
6941 6941
6942PCI DRIVER FOR APPLIEDMICRO XGENE
6943M: Tanmay Inamdar <tinamdar@apm.com>
6944L: linux-pci@vger.kernel.org
6945L: linux-arm-kernel@lists.infradead.org
6946S: Maintained
6947F: Documentation/devicetree/bindings/pci/xgene-pci.txt
6948F: drivers/pci/host/pci-xgene.c
6949
6942PCI DRIVER FOR IMX6 6950PCI DRIVER FOR IMX6
6943M: Richard Zhu <r65037@freescale.com> 6951M: Richard Zhu <r65037@freescale.com>
6944M: Lucas Stach <l.stach@pengutronix.de> 6952M: Lucas Stach <l.stach@pengutronix.de>
@@ -6947,6 +6955,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6947S: Maintained 6955S: Maintained
6948F: drivers/pci/host/*imx6* 6956F: drivers/pci/host/*imx6*
6949 6957
6958PCI DRIVER FOR TI KEYSTONE
6959M: Murali Karicheri <m-karicheri2@ti.com>
6960L: linux-pci@vger.kernel.org
6961L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6962S: Maintained
6963F: drivers/pci/host/*keystone*
6964
6950PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) 6965PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
6951M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 6966M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6952M: Jason Cooper <jason@lakedaemon.net> 6967M: Jason Cooper <jason@lakedaemon.net>
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index fa5f2bb5f106..9d342920695a 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -85,7 +85,8 @@
85 85
86 pcie0: pcie@b1000000 { 86 pcie0: pcie@b1000000 {
87 compatible = "st,spear1340-pcie", "snps,dw-pcie"; 87 compatible = "st,spear1340-pcie", "snps,dw-pcie";
88 reg = <0xb1000000 0x4000>; 88 reg = <0xb1000000 0x4000>, <0x80000000 0x20000>;
89 reg-names = "dbi", "config";
89 interrupts = <0 68 0x4>; 90 interrupts = <0 68 0x4>;
90 interrupt-map-mask = <0 0 0 0>; 91 interrupt-map-mask = <0 0 0 0>;
91 interrupt-map = <0x0 0 &gic 0 68 0x4>; 92 interrupt-map = <0x0 0 &gic 0 68 0x4>;
@@ -95,15 +96,15 @@
95 #address-cells = <3>; 96 #address-cells = <3>;
96 #size-cells = <2>; 97 #size-cells = <2>;
97 device_type = "pci"; 98 device_type = "pci";
98 ranges = <0x00000800 0 0x80000000 0x80000000 0 0x00020000 /* configuration space */ 99 ranges = <0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
99 0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
100 0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */ 100 0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
101 status = "disabled"; 101 status = "disabled";
102 }; 102 };
103 103
104 pcie1: pcie@b1800000 { 104 pcie1: pcie@b1800000 {
105 compatible = "st,spear1340-pcie", "snps,dw-pcie"; 105 compatible = "st,spear1340-pcie", "snps,dw-pcie";
106 reg = <0xb1800000 0x4000>; 106 reg = <0xb1800000 0x4000>, <0x90000000 0x20000>;
107 reg-names = "dbi", "config";
107 interrupts = <0 69 0x4>; 108 interrupts = <0 69 0x4>;
108 interrupt-map-mask = <0 0 0 0>; 109 interrupt-map-mask = <0 0 0 0>;
109 interrupt-map = <0x0 0 &gic 0 69 0x4>; 110 interrupt-map = <0x0 0 &gic 0 69 0x4>;
@@ -113,15 +114,15 @@
113 #address-cells = <3>; 114 #address-cells = <3>;
114 #size-cells = <2>; 115 #size-cells = <2>;
115 device_type = "pci"; 116 device_type = "pci";
116 ranges = <0x00000800 0 0x90000000 0x90000000 0 0x00020000 /* configuration space */ 117 ranges = <0x81000000 0 0 0x90020000 0 0x00010000 /* downstream I/O */
117 0x81000000 0 0 0x90020000 0 0x00010000 /* downstream I/O */
118 0x82000000 0 0x90030000 0x90030000 0 0x0ffd0000>; /* non-prefetchable memory */ 118 0x82000000 0 0x90030000 0x90030000 0 0x0ffd0000>; /* non-prefetchable memory */
119 status = "disabled"; 119 status = "disabled";
120 }; 120 };
121 121
122 pcie2: pcie@b4000000 { 122 pcie2: pcie@b4000000 {
123 compatible = "st,spear1340-pcie", "snps,dw-pcie"; 123 compatible = "st,spear1340-pcie", "snps,dw-pcie";
124 reg = <0xb4000000 0x4000>; 124 reg = <0xb4000000 0x4000>, <0xc0000000 0x20000>;
125 reg-names = "dbi", "config";
125 interrupts = <0 70 0x4>; 126 interrupts = <0 70 0x4>;
126 interrupt-map-mask = <0 0 0 0>; 127 interrupt-map-mask = <0 0 0 0>;
127 interrupt-map = <0x0 0 &gic 0 70 0x4>; 128 interrupt-map = <0x0 0 &gic 0 70 0x4>;
@@ -131,8 +132,7 @@
131 #address-cells = <3>; 132 #address-cells = <3>;
132 #size-cells = <2>; 133 #size-cells = <2>;
133 device_type = "pci"; 134 device_type = "pci";
134 ranges = <0x00000800 0 0xc0000000 0xc0000000 0 0x00020000 /* configuration space */ 135 ranges = <0x81000000 0 0 0xc0020000 0 0x00010000 /* downstream I/O */
135 0x81000000 0 0 0xc0020000 0 0x00010000 /* downstream I/O */
136 0x82000000 0 0xc0030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */ 136 0x82000000 0 0xc0030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
137 status = "disabled"; 137 status = "disabled";
138 }; 138 };
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index e71df0f2cb52..13e1aa33daa2 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -50,7 +50,8 @@
50 50
51 pcie0: pcie@b1000000 { 51 pcie0: pcie@b1000000 {
52 compatible = "st,spear1340-pcie", "snps,dw-pcie"; 52 compatible = "st,spear1340-pcie", "snps,dw-pcie";
53 reg = <0xb1000000 0x4000>; 53 reg = <0xb1000000 0x4000>, <0x80000000 0x20000>;
54 reg-names = "dbi", "config";
54 interrupts = <0 68 0x4>; 55 interrupts = <0 68 0x4>;
55 interrupt-map-mask = <0 0 0 0>; 56 interrupt-map-mask = <0 0 0 0>;
56 interrupt-map = <0x0 0 &gic 0 68 0x4>; 57 interrupt-map = <0x0 0 &gic 0 68 0x4>;
@@ -60,8 +61,7 @@
60 #address-cells = <3>; 61 #address-cells = <3>;
61 #size-cells = <2>; 62 #size-cells = <2>;
62 device_type = "pci"; 63 device_type = "pci";
63 ranges = <0x00000800 0 0x80000000 0x80000000 0 0x00020000 /* configuration space */ 64 ranges = <0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
64 0x81000000 0 0 0x80020000 0 0x00010000 /* downstream I/O */
65 0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */ 65 0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
66 status = "disabled"; 66 status = "disabled";
67 }; 67 };
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 3d23418cbddd..180567408ee8 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -178,6 +178,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
178 178
179/* PCI fixed i/o mapping */ 179/* PCI fixed i/o mapping */
180#define PCI_IO_VIRT_BASE 0xfee00000 180#define PCI_IO_VIRT_BASE 0xfee00000
181#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE)
181 182
182#if defined(CONFIG_PCI) 183#if defined(CONFIG_PCI)
183void pci_ioremap_set_mem_type(int mem_type); 184void pci_ioremap_set_mem_type(int mem_type);
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index 05e1f73a1e8d..c186a17c2cff 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -660,6 +660,7 @@ static void __init pci_v3_preinit(void)
660{ 660{
661 unsigned long flags; 661 unsigned long flags;
662 unsigned int temp; 662 unsigned int temp;
663 phys_addr_t io_address = pci_pio_to_address(io_mem.start);
663 664
664 pcibios_min_mem = 0x00100000; 665 pcibios_min_mem = 0x00100000;
665 666
@@ -701,7 +702,7 @@ static void __init pci_v3_preinit(void)
701 /* 702 /*
702 * Setup window 2 - PCI IO 703 * Setup window 2 - PCI IO
703 */ 704 */
704 v3_writel(V3_LB_BASE2, v3_addr_to_lb_base2(io_mem.start) | 705 v3_writel(V3_LB_BASE2, v3_addr_to_lb_base2(io_address) |
705 V3_LB_BASE_ENABLE); 706 V3_LB_BASE_ENABLE);
706 v3_writew(V3_LB_MAP2, v3_addr_to_lb_map2(0)); 707 v3_writew(V3_LB_MAP2, v3_addr_to_lb_map2(0));
707 708
@@ -742,6 +743,7 @@ static void __init pci_v3_preinit(void)
742static void __init pci_v3_postinit(void) 743static void __init pci_v3_postinit(void)
743{ 744{
744 unsigned int pci_cmd; 745 unsigned int pci_cmd;
746 phys_addr_t io_address = pci_pio_to_address(io_mem.start);
745 747
746 pci_cmd = PCI_COMMAND_MEMORY | 748 pci_cmd = PCI_COMMAND_MEMORY |
747 PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; 749 PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
@@ -758,7 +760,7 @@ static void __init pci_v3_postinit(void)
758 "interrupt: %d\n", ret); 760 "interrupt: %d\n", ret);
759#endif 761#endif
760 762
761 register_isa_ports(non_mem.start, io_mem.start, 0); 763 register_isa_ports(non_mem.start, io_address, 0);
762} 764}
763 765
764/* 766/*
@@ -867,33 +869,32 @@ static int __init pci_v3_probe(struct platform_device *pdev)
867 869
868 for_each_of_pci_range(&parser, &range) { 870 for_each_of_pci_range(&parser, &range) {
869 if (!range.flags) { 871 if (!range.flags) {
870 of_pci_range_to_resource(&range, np, &conf_mem); 872 ret = of_pci_range_to_resource(&range, np, &conf_mem);
871 conf_mem.name = "PCIv3 config"; 873 conf_mem.name = "PCIv3 config";
872 } 874 }
873 if (range.flags & IORESOURCE_IO) { 875 if (range.flags & IORESOURCE_IO) {
874 of_pci_range_to_resource(&range, np, &io_mem); 876 ret = of_pci_range_to_resource(&range, np, &io_mem);
875 io_mem.name = "PCIv3 I/O"; 877 io_mem.name = "PCIv3 I/O";
876 } 878 }
877 if ((range.flags & IORESOURCE_MEM) && 879 if ((range.flags & IORESOURCE_MEM) &&
878 !(range.flags & IORESOURCE_PREFETCH)) { 880 !(range.flags & IORESOURCE_PREFETCH)) {
879 non_mem_pci = range.pci_addr; 881 non_mem_pci = range.pci_addr;
880 non_mem_pci_sz = range.size; 882 non_mem_pci_sz = range.size;
881 of_pci_range_to_resource(&range, np, &non_mem); 883 ret = of_pci_range_to_resource(&range, np, &non_mem);
882 non_mem.name = "PCIv3 non-prefetched mem"; 884 non_mem.name = "PCIv3 non-prefetched mem";
883 } 885 }
884 if ((range.flags & IORESOURCE_MEM) && 886 if ((range.flags & IORESOURCE_MEM) &&
885 (range.flags & IORESOURCE_PREFETCH)) { 887 (range.flags & IORESOURCE_PREFETCH)) {
886 pre_mem_pci = range.pci_addr; 888 pre_mem_pci = range.pci_addr;
887 pre_mem_pci_sz = range.size; 889 pre_mem_pci_sz = range.size;
888 of_pci_range_to_resource(&range, np, &pre_mem); 890 ret = of_pci_range_to_resource(&range, np, &pre_mem);
889 pre_mem.name = "PCIv3 prefetched mem"; 891 pre_mem.name = "PCIv3 prefetched mem";
890 } 892 }
891 }
892 893
893 if (!conf_mem.start || !io_mem.start || 894 if (ret < 0) {
894 !non_mem.start || !pre_mem.start) { 895 dev_err(&pdev->dev, "missing ranges in device node\n");
895 dev_err(&pdev->dev, "missing ranges in device node\n"); 896 return ret;
896 return -EINVAL; 897 }
897 } 898 }
898 899
899 pci_v3.map_irq = of_irq_parse_and_map_pci; 900 pci_v3.map_irq = of_irq_parse_and_map_pci;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9746dc24a117..3f0e854d0ff4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -83,7 +83,7 @@ config MMU
83 def_bool y 83 def_bool y
84 84
85config NO_IOPORT_MAP 85config NO_IOPORT_MAP
86 def_bool y 86 def_bool y if !PCI
87 87
88config STACKTRACE_SUPPORT 88config STACKTRACE_SUPPORT
89 def_bool y 89 def_bool y
@@ -163,6 +163,26 @@ menu "Bus support"
163config ARM_AMBA 163config ARM_AMBA
164 bool 164 bool
165 165
166config PCI
167 bool "PCI support"
168 help
169 This feature enables support for PCI bus system. If you say Y
170 here, the kernel will include drivers and infrastructure code
171 to support PCI bus devices.
172
173config PCI_DOMAINS
174 def_bool PCI
175
176config PCI_DOMAINS_GENERIC
177 def_bool PCI
178
179config PCI_SYSCALL
180 def_bool PCI
181
182source "drivers/pci/Kconfig"
183source "drivers/pci/pcie/Kconfig"
184source "drivers/pci/hotplug/Kconfig"
185
166endmenu 186endmenu
167 187
168menu "Kernel Features" 188menu "Kernel Features"
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
index b2f56229aa5e..f64900052f4e 100644
--- a/arch/arm64/boot/dts/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -25,6 +25,14 @@
25 }; 25 };
26}; 26};
27 27
28&pcie0clk {
29 status = "ok";
30};
31
32&pcie0 {
33 status = "ok";
34};
35
28&serial0 { 36&serial0 {
29 status = "ok"; 37 status = "ok";
30}; 38};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index f391972ad135..4f6d04d52cca 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -282,6 +282,171 @@
282 enable-mask = <0x10>; 282 enable-mask = <0x10>;
283 clock-output-names = "rngpkaclk"; 283 clock-output-names = "rngpkaclk";
284 }; 284 };
285
286 pcie0clk: pcie0clk@1f2bc000 {
287 status = "disabled";
288 compatible = "apm,xgene-device-clock";
289 #clock-cells = <1>;
290 clocks = <&socplldiv2 0>;
291 reg = <0x0 0x1f2bc000 0x0 0x1000>;
292 reg-names = "csr-reg";
293 clock-output-names = "pcie0clk";
294 };
295
296 pcie1clk: pcie1clk@1f2cc000 {
297 status = "disabled";
298 compatible = "apm,xgene-device-clock";
299 #clock-cells = <1>;
300 clocks = <&socplldiv2 0>;
301 reg = <0x0 0x1f2cc000 0x0 0x1000>;
302 reg-names = "csr-reg";
303 clock-output-names = "pcie1clk";
304 };
305
306 pcie2clk: pcie2clk@1f2dc000 {
307 status = "disabled";
308 compatible = "apm,xgene-device-clock";
309 #clock-cells = <1>;
310 clocks = <&socplldiv2 0>;
311 reg = <0x0 0x1f2dc000 0x0 0x1000>;
312 reg-names = "csr-reg";
313 clock-output-names = "pcie2clk";
314 };
315
316 pcie3clk: pcie3clk@1f50c000 {
317 status = "disabled";
318 compatible = "apm,xgene-device-clock";
319 #clock-cells = <1>;
320 clocks = <&socplldiv2 0>;
321 reg = <0x0 0x1f50c000 0x0 0x1000>;
322 reg-names = "csr-reg";
323 clock-output-names = "pcie3clk";
324 };
325
326 pcie4clk: pcie4clk@1f51c000 {
327 status = "disabled";
328 compatible = "apm,xgene-device-clock";
329 #clock-cells = <1>;
330 clocks = <&socplldiv2 0>;
331 reg = <0x0 0x1f51c000 0x0 0x1000>;
332 reg-names = "csr-reg";
333 clock-output-names = "pcie4clk";
334 };
335 };
336
337 pcie0: pcie@1f2b0000 {
338 status = "disabled";
339 device_type = "pci";
340 compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
341 #interrupt-cells = <1>;
342 #size-cells = <2>;
343 #address-cells = <3>;
344 reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */
345 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
346 reg-names = "csr", "cfg";
347 ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
348 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
349 dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
350 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
351 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
352 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1
353 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1
354 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1
355 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
356 dma-coherent;
357 clocks = <&pcie0clk 0>;
358 };
359
360 pcie1: pcie@1f2c0000 {
361 status = "disabled";
362 device_type = "pci";
363 compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
364 #interrupt-cells = <1>;
365 #size-cells = <2>;
366 #address-cells = <3>;
367 reg = < 0x00 0x1f2c0000 0x0 0x00010000 /* Controller registers */
368 0xd0 0xd0000000 0x0 0x00040000>; /* PCI config space */
369 reg-names = "csr", "cfg";
370 ranges = <0x01000000 0x0 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */
371 0x02000000 0x0 0x80000000 0xd1 0x80000000 0x00 0x80000000>; /* mem */
372 dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
373 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
374 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
375 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x1
376 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x1
377 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x1
378 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>;
379 dma-coherent;
380 clocks = <&pcie1clk 0>;
381 };
382
383 pcie2: pcie@1f2d0000 {
384 status = "disabled";
385 device_type = "pci";
386 compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
387 #interrupt-cells = <1>;
388 #size-cells = <2>;
389 #address-cells = <3>;
390 reg = < 0x00 0x1f2d0000 0x0 0x00010000 /* Controller registers */
391 0x90 0xd0000000 0x0 0x00040000>; /* PCI config space */
392 reg-names = "csr", "cfg";
393 ranges = <0x01000000 0x0 0x00000000 0x90 0x10000000 0x0 0x00010000 /* io */
394 0x02000000 0x0 0x80000000 0x91 0x80000000 0x0 0x80000000>; /* mem */
395 dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
396 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
397 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
398 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x1
399 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x1
400 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x1
401 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>;
402 dma-coherent;
403 clocks = <&pcie2clk 0>;
404 };
405
406 pcie3: pcie@1f500000 {
407 status = "disabled";
408 device_type = "pci";
409 compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
410 #interrupt-cells = <1>;
411 #size-cells = <2>;
412 #address-cells = <3>;
413 reg = < 0x00 0x1f500000 0x0 0x00010000 /* Controller registers */
414 0xa0 0xd0000000 0x0 0x00040000>; /* PCI config space */
415 reg-names = "csr", "cfg";
416 ranges = <0x01000000 0x0 0x00000000 0xa0 0x10000000 0x0 0x00010000 /* io */
417 0x02000000 0x0 0x80000000 0xa1 0x80000000 0x0 0x80000000>; /* mem */
418 dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
419 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
420 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
421 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x1
422 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x1
423 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x1
424 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>;
425 dma-coherent;
426 clocks = <&pcie3clk 0>;
427 };
428
429 pcie4: pcie@1f510000 {
430 status = "disabled";
431 device_type = "pci";
432 compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie";
433 #interrupt-cells = <1>;
434 #size-cells = <2>;
435 #address-cells = <3>;
436 reg = < 0x00 0x1f510000 0x0 0x00010000 /* Controller registers */
437 0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */
438 reg-names = "csr", "cfg";
439 ranges = <0x01000000 0x0 0x00000000 0xc0 0x10000000 0x0 0x00010000 /* io */
440 0x02000000 0x0 0x80000000 0xc1 0x80000000 0x0 0x80000000>; /* mem */
441 dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
442 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
443 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
444 interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x1
445 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x1
446 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x1
447 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>;
448 dma-coherent;
449 clocks = <&pcie4clk 0>;
285 }; 450 };
286 451
287 serial0: serial@1c020000 { 452 serial0: serial@1c020000 {
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index c1968475cc4e..774a7c85e70f 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -29,6 +29,7 @@ generic-y += mman.h
29generic-y += msgbuf.h 29generic-y += msgbuf.h
30generic-y += mutex.h 30generic-y += mutex.h
31generic-y += pci.h 31generic-y += pci.h
32generic-y += pci-bridge.h
32generic-y += poll.h 33generic-y += poll.h
33generic-y += preempt.h 34generic-y += preempt.h
34generic-y += resource.h 35generic-y += resource.h
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index f771e8bcad4a..79f1d519221f 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -121,7 +121,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
121/* 121/*
122 * I/O port access primitives. 122 * I/O port access primitives.
123 */ 123 */
124#define IO_SPACE_LIMIT 0xffff 124#define arch_has_dev_port() (1)
125#define IO_SPACE_LIMIT (SZ_32M - 1)
125#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) 126#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
126 127
127static inline u8 inb(unsigned long addr) 128static inline u8 inb(unsigned long addr)
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
new file mode 100644
index 000000000000..872ba939fcb2
--- /dev/null
+++ b/arch/arm64/include/asm/pci.h
@@ -0,0 +1,37 @@
1#ifndef __ASM_PCI_H
2#define __ASM_PCI_H
3#ifdef __KERNEL__
4
5#include <linux/types.h>
6#include <linux/slab.h>
7#include <linux/dma-mapping.h>
8
9#include <asm/io.h>
10#include <asm-generic/pci-bridge.h>
11#include <asm-generic/pci-dma-compat.h>
12
13#define PCIBIOS_MIN_IO 0x1000
14#define PCIBIOS_MIN_MEM 0
15
16/*
17 * Set to 1 if the kernel should re-assign all PCI bus numbers
18 */
19#define pcibios_assign_all_busses() \
20 (pci_has_flag(PCI_REASSIGN_ALL_BUS))
21
22/*
23 * PCI address space differs from physical memory address space
24 */
25#define PCI_DMA_BUS_IS_PHYS (0)
26
27extern int isa_dma_bridge_buggy;
28
29#ifdef CONFIG_PCI
30static inline int pci_proc_domain(struct pci_bus *bus)
31{
32 return 1;
33}
34#endif /* CONFIG_PCI */
35
36#endif /* __KERNEL__ */
37#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index d58e40cde88e..77dbe1e6398d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -301,6 +301,8 @@ static inline int has_transparent_hugepage(void)
301 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 301 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
302#define pgprot_writecombine(prot) \ 302#define pgprot_writecombine(prot) \
303 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 303 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
304#define pgprot_device(prot) \
305 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
304#define __HAVE_PHYS_MEM_ACCESS_PROT 306#define __HAVE_PHYS_MEM_ACCESS_PROT
305struct file; 307struct file;
306extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 308extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 6e9538c2d28a..5bd029b43644 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -30,6 +30,7 @@ arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
30arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o 30arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
31arm64-obj-$(CONFIG_KGDB) += kgdb.o 31arm64-obj-$(CONFIG_KGDB) += kgdb.o
32arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o 32arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
33arm64-obj-$(CONFIG_PCI) += pci.o
33 34
34obj-y += $(arm64-obj-y) vdso/ 35obj-y += $(arm64-obj-y) vdso/
35obj-m += $(arm64-obj-m) 36obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
new file mode 100644
index 000000000000..ce5836c14ec1
--- /dev/null
+++ b/arch/arm64/kernel/pci.c
@@ -0,0 +1,70 @@
1/*
2 * Code borrowed from powerpc/kernel/pci-common.c
3 *
4 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
5 * Copyright (C) 2014 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/init.h>
14#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/of_pci.h>
18#include <linux/of_platform.h>
19#include <linux/slab.h>
20
21#include <asm/pci-bridge.h>
22
23/*
24 * Called after each bus is probed, but before its children are examined
25 */
26void pcibios_fixup_bus(struct pci_bus *bus)
27{
28 /* nothing to do, expected to be removed in the future */
29}
30
31/*
32 * We don't have to worry about legacy ISA devices, so nothing to do here
33 */
34resource_size_t pcibios_align_resource(void *data, const struct resource *res,
35 resource_size_t size, resource_size_t align)
36{
37 return res->start;
38}
39
40/*
41 * Try to assign the IRQ number from DT when adding a new device
42 */
43int pcibios_add_device(struct pci_dev *dev)
44{
45 dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
46
47 return 0;
48}
49
50
51#ifdef CONFIG_PCI_DOMAINS_GENERIC
52static bool dt_domain_found = false;
53
54void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
55{
56 int domain = of_get_pci_domain_nr(parent->of_node);
57
58 if (domain >= 0) {
59 dt_domain_found = true;
60 } else if (dt_domain_found == true) {
61 dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
62 parent->of_node->full_name);
63 return;
64 } else {
65 domain = pci_get_new_domain_nr();
66 }
67
68 bus->domain_nr = domain;
69}
70#endif
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index c430f9198d1b..8c3730c3c63d 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -23,7 +23,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
23 if (irq_prepare_move(irq, cpu)) 23 if (irq_prepare_move(irq, cpu))
24 return -1; 24 return -1;
25 25
26 get_cached_msi_msg(irq, &msg); 26 __get_cached_msi_msg(idata->msi_desc, &msg);
27 27
28 addr = msg.address_lo; 28 addr = msg.address_lo;
29 addr &= MSI_ADDR_DEST_ID_MASK; 29 addr &= MSI_ADDR_DEST_ID_MASK;
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index afc58d2799ad..446e7799928c 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -175,8 +175,8 @@ static int sn_set_msi_irq_affinity(struct irq_data *data,
175 * Release XIO resources for the old MSI PCI address 175 * Release XIO resources for the old MSI PCI address
176 */ 176 */
177 177
178 get_cached_msi_msg(irq, &msg); 178 __get_cached_msi_msg(data->msi_desc, &msg);
179 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; 179 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
180 pdev = sn_pdev->pdi_linux_pcidev; 180 pdev = sn_pdev->pdi_linux_pcidev;
181 provider = SN_PCIDEV_BUSPROVIDER(pdev); 181 provider = SN_PCIDEV_BUSPROVIDER(pdev);
182 182
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index ab0c5d14c6f7..63bbe07a1ccd 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -73,8 +73,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
73 * wants. Most devices only want 1, which will give 73 * wants. Most devices only want 1, which will give
74 * configured_private_bits and request_private_bits equal 0. 74 * configured_private_bits and request_private_bits equal 0.
75 */ 75 */
76 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, 76 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
77 &control);
78 77
79 /* 78 /*
80 * If the number of private bits has been configured then use 79 * If the number of private bits has been configured then use
@@ -176,8 +175,7 @@ msi_irq_allocated:
176 /* Update the number of IRQs the device has available to it */ 175 /* Update the number of IRQs the device has available to it */
177 control &= ~PCI_MSI_FLAGS_QSIZE; 176 control &= ~PCI_MSI_FLAGS_QSIZE;
178 control |= request_private_bits << 4; 177 control |= request_private_bits << 4;
179 pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, 178 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
180 control);
181 179
182 irq_set_msi_desc(irq, desc); 180 irq_set_msi_desc(irq, desc);
183 write_msi_msg(irq, &msg); 181 write_msi_msg(irq, &msg);
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index b125ceab149c..3af721633618 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -136,8 +136,6 @@ struct machdep_calls {
136 int (*pci_setup_phb)(struct pci_controller *host); 136 int (*pci_setup_phb)(struct pci_controller *host);
137 137
138#ifdef CONFIG_PCI_MSI 138#ifdef CONFIG_PCI_MSI
139 int (*msi_check_device)(struct pci_dev* dev,
140 int nvec, int type);
141 int (*setup_msi_irqs)(struct pci_dev *dev, 139 int (*setup_msi_irqs)(struct pci_dev *dev,
142 int nvec, int type); 140 int nvec, int type);
143 void (*teardown_msi_irqs)(struct pci_dev *dev); 141 void (*teardown_msi_irqs)(struct pci_dev *dev);
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index 8bbc12d20f5c..71bd161640cf 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -13,7 +13,7 @@
13 13
14#include <asm/machdep.h> 14#include <asm/machdep.h>
15 15
16int arch_msi_check_device(struct pci_dev* dev, int nvec, int type) 16int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
17{ 17{
18 if (!ppc_md.setup_msi_irqs || !ppc_md.teardown_msi_irqs) { 18 if (!ppc_md.setup_msi_irqs || !ppc_md.teardown_msi_irqs) {
19 pr_debug("msi: Platform doesn't provide MSI callbacks.\n"); 19 pr_debug("msi: Platform doesn't provide MSI callbacks.\n");
@@ -24,16 +24,6 @@ int arch_msi_check_device(struct pci_dev* dev, int nvec, int type)
24 if (type == PCI_CAP_ID_MSI && nvec > 1) 24 if (type == PCI_CAP_ID_MSI && nvec > 1)
25 return 1; 25 return 1;
26 26
27 if (ppc_md.msi_check_device) {
28 pr_debug("msi: Using platform check routine.\n");
29 return ppc_md.msi_check_device(dev, nvec, type);
30 }
31
32 return 0;
33}
34
35int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
36{
37 return ppc_md.setup_msi_irqs(dev, nvec, type); 27 return ppc_md.setup_msi_irqs(dev, nvec, type);
38} 28}
39 29
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 85825b5401e5..862b32702d29 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -199,14 +199,6 @@ out_error:
199 return msic; 199 return msic;
200} 200}
201 201
202static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
203{
204 if (!find_msi_translator(dev))
205 return -ENODEV;
206
207 return 0;
208}
209
210static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) 202static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
211{ 203{
212 struct device_node *dn; 204 struct device_node *dn;
@@ -416,7 +408,6 @@ static int axon_msi_probe(struct platform_device *device)
416 408
417 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs; 409 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
418 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs; 410 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
419 ppc_md.msi_check_device = axon_msi_check_device;
420 411
421 axon_msi_debug_setup(dn, msic); 412 axon_msi_debug_setup(dn, msic);
422 413
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b854b57ed5e1..b45c49249a5d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -46,29 +46,21 @@
46//#define cfg_dbg(fmt...) printk(fmt) 46//#define cfg_dbg(fmt...) printk(fmt)
47 47
48#ifdef CONFIG_PCI_MSI 48#ifdef CONFIG_PCI_MSI
49static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
50{
51 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
52 struct pnv_phb *phb = hose->private_data;
53 struct pci_dn *pdn = pci_get_pdn(pdev);
54
55 if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
56 return -ENODEV;
57
58 return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
59}
60
61static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 49static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
62{ 50{
63 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 51 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
64 struct pnv_phb *phb = hose->private_data; 52 struct pnv_phb *phb = hose->private_data;
53 struct pci_dn *pdn = pci_get_pdn(pdev);
65 struct msi_desc *entry; 54 struct msi_desc *entry;
66 struct msi_msg msg; 55 struct msi_msg msg;
67 int hwirq; 56 int hwirq;
68 unsigned int virq; 57 unsigned int virq;
69 int rc; 58 int rc;
70 59
71 if (WARN_ON(!phb)) 60 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
61 return -ENODEV;
62
63 if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
72 return -ENODEV; 64 return -ENODEV;
73 65
74 list_for_each_entry(entry, &pdev->msi_list, list) { 66 list_for_each_entry(entry, &pdev->msi_list, list) {
@@ -860,7 +852,6 @@ void __init pnv_pci_init(void)
860 852
861 /* Configure MSIs */ 853 /* Configure MSIs */
862#ifdef CONFIG_PCI_MSI 854#ifdef CONFIG_PCI_MSI
863 ppc_md.msi_check_device = pnv_msi_check_device;
864 ppc_md.setup_msi_irqs = pnv_setup_msi_irqs; 855 ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
865 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs; 856 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
866#endif 857#endif
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 18ff4626d74e..8ab5add4ac82 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -336,26 +336,6 @@ out:
336 return request; 336 return request;
337} 337}
338 338
339static int rtas_msi_check_device(struct pci_dev *pdev, int nvec, int type)
340{
341 int quota, rc;
342
343 if (type == PCI_CAP_ID_MSIX)
344 rc = check_req_msix(pdev, nvec);
345 else
346 rc = check_req_msi(pdev, nvec);
347
348 if (rc)
349 return rc;
350
351 quota = msi_quota_for_device(pdev, nvec);
352
353 if (quota && quota < nvec)
354 return quota;
355
356 return 0;
357}
358
359static int check_msix_entries(struct pci_dev *pdev) 339static int check_msix_entries(struct pci_dev *pdev)
360{ 340{
361 struct msi_desc *entry; 341 struct msi_desc *entry;
@@ -397,15 +377,24 @@ static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
397static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) 377static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
398{ 378{
399 struct pci_dn *pdn; 379 struct pci_dn *pdn;
400 int hwirq, virq, i, rc; 380 int hwirq, virq, i, quota, rc;
401 struct msi_desc *entry; 381 struct msi_desc *entry;
402 struct msi_msg msg; 382 struct msi_msg msg;
403 int nvec = nvec_in; 383 int nvec = nvec_in;
404 int use_32bit_msi_hack = 0; 384 int use_32bit_msi_hack = 0;
405 385
406 pdn = pci_get_pdn(pdev); 386 if (type == PCI_CAP_ID_MSIX)
407 if (!pdn) 387 rc = check_req_msix(pdev, nvec);
408 return -ENODEV; 388 else
389 rc = check_req_msi(pdev, nvec);
390
391 if (rc)
392 return rc;
393
394 quota = msi_quota_for_device(pdev, nvec);
395
396 if (quota && quota < nvec)
397 return quota;
409 398
410 if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev)) 399 if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev))
411 return -EINVAL; 400 return -EINVAL;
@@ -416,12 +405,14 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
416 */ 405 */
417 if (type == PCI_CAP_ID_MSIX) { 406 if (type == PCI_CAP_ID_MSIX) {
418 int m = roundup_pow_of_two(nvec); 407 int m = roundup_pow_of_two(nvec);
419 int quota = msi_quota_for_device(pdev, m); 408 quota = msi_quota_for_device(pdev, m);
420 409
421 if (quota >= m) 410 if (quota >= m)
422 nvec = m; 411 nvec = m;
423 } 412 }
424 413
414 pdn = pci_get_pdn(pdev);
415
425 /* 416 /*
426 * Try the new more explicit firmware interface, if that fails fall 417 * Try the new more explicit firmware interface, if that fails fall
427 * back to the old interface. The old interface is known to never 418 * back to the old interface. The old interface is known to never
@@ -485,7 +476,7 @@ again:
485 irq_set_msi_desc(virq, entry); 476 irq_set_msi_desc(virq, entry);
486 477
487 /* Read config space back so we can restore after reset */ 478 /* Read config space back so we can restore after reset */
488 read_msi_msg(virq, &msg); 479 __read_msi_msg(entry, &msg);
489 entry->msg = msg; 480 entry->msg = msg;
490 } 481 }
491 482
@@ -526,7 +517,6 @@ static int rtas_msi_init(void)
526 WARN_ON(ppc_md.setup_msi_irqs); 517 WARN_ON(ppc_md.setup_msi_irqs);
527 ppc_md.setup_msi_irqs = rtas_setup_msi_irqs; 518 ppc_md.setup_msi_irqs = rtas_setup_msi_irqs;
528 ppc_md.teardown_msi_irqs = rtas_teardown_msi_irqs; 519 ppc_md.teardown_msi_irqs = rtas_teardown_msi_irqs;
529 ppc_md.msi_check_device = rtas_msi_check_device;
530 520
531 WARN_ON(ppc_md.pci_irq_fixup); 521 WARN_ON(ppc_md.pci_irq_fixup);
532 ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup; 522 ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 77efbaec7b9c..b32e79dbef4f 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -109,14 +109,6 @@ static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
109 return 0; 109 return 0;
110} 110}
111 111
112static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type)
113{
114 if (type == PCI_CAP_ID_MSIX)
115 pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
116
117 return 0;
118}
119
120static void fsl_teardown_msi_irqs(struct pci_dev *pdev) 112static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
121{ 113{
122 struct msi_desc *entry; 114 struct msi_desc *entry;
@@ -173,6 +165,9 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
173 struct msi_msg msg; 165 struct msi_msg msg;
174 struct fsl_msi *msi_data; 166 struct fsl_msi *msi_data;
175 167
168 if (type == PCI_CAP_ID_MSIX)
169 pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
170
176 /* 171 /*
177 * If the PCI node has an fsl,msi property, then we need to use it 172 * If the PCI node has an fsl,msi property, then we need to use it
178 * to find the specific MSI. 173 * to find the specific MSI.
@@ -527,7 +522,6 @@ static int fsl_of_msi_probe(struct platform_device *dev)
527 if (!ppc_md.setup_msi_irqs) { 522 if (!ppc_md.setup_msi_irqs) {
528 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs; 523 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
529 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs; 524 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
530 ppc_md.msi_check_device = fsl_msi_check_device;
531 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) { 525 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
532 dev_err(&dev->dev, "Different MSI driver already installed!\n"); 526 dev_err(&dev->dev, "Different MSI driver already installed!\n");
533 err = -ENODEV; 527 err = -ENODEV;
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 38e62382070c..15dccd35fa11 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -63,14 +63,6 @@ static struct irq_chip mpic_pasemi_msi_chip = {
63 .name = "PASEMI-MSI", 63 .name = "PASEMI-MSI",
64}; 64};
65 65
66static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
67{
68 if (type == PCI_CAP_ID_MSIX)
69 pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
70
71 return 0;
72}
73
74static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) 66static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
75{ 67{
76 struct msi_desc *entry; 68 struct msi_desc *entry;
@@ -97,6 +89,8 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
97 struct msi_msg msg; 89 struct msi_msg msg;
98 int hwirq; 90 int hwirq;
99 91
92 if (type == PCI_CAP_ID_MSIX)
93 pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
100 pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n", 94 pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n",
101 pdev, nvec, type); 95 pdev, nvec, type);
102 96
@@ -169,7 +163,6 @@ int mpic_pasemi_msi_init(struct mpic *mpic)
169 WARN_ON(ppc_md.setup_msi_irqs); 163 WARN_ON(ppc_md.setup_msi_irqs);
170 ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs; 164 ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs;
171 ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs; 165 ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs;
172 ppc_md.msi_check_device = pasemi_msi_check_device;
173 166
174 return 0; 167 return 0;
175} 168}
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 9a7aa0ed9c1c..623d7fba15b4 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -105,22 +105,6 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
105 return 0; 105 return 0;
106} 106}
107 107
108static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
109{
110 if (type == PCI_CAP_ID_MSIX)
111 pr_debug("u3msi: MSI-X untested, trying anyway.\n");
112
113 /* If we can't find a magic address then MSI ain't gonna work */
114 if (find_ht_magic_addr(pdev, 0) == 0 &&
115 find_u4_magic_addr(pdev, 0) == 0) {
116 pr_debug("u3msi: no magic address found for %s\n",
117 pci_name(pdev));
118 return -ENXIO;
119 }
120
121 return 0;
122}
123
124static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) 108static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
125{ 109{
126 struct msi_desc *entry; 110 struct msi_desc *entry;
@@ -146,6 +130,17 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
146 u64 addr; 130 u64 addr;
147 int hwirq; 131 int hwirq;
148 132
133 if (type == PCI_CAP_ID_MSIX)
134 pr_debug("u3msi: MSI-X untested, trying anyway.\n");
135
136 /* If we can't find a magic address then MSI ain't gonna work */
137 if (find_ht_magic_addr(pdev, 0) == 0 &&
138 find_u4_magic_addr(pdev, 0) == 0) {
139 pr_debug("u3msi: no magic address found for %s\n",
140 pci_name(pdev));
141 return -ENXIO;
142 }
143
149 list_for_each_entry(entry, &pdev->msi_list, list) { 144 list_for_each_entry(entry, &pdev->msi_list, list) {
150 hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); 145 hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
151 if (hwirq < 0) { 146 if (hwirq < 0) {
@@ -202,7 +197,6 @@ int mpic_u3msi_init(struct mpic *mpic)
202 WARN_ON(ppc_md.setup_msi_irqs); 197 WARN_ON(ppc_md.setup_msi_irqs);
203 ppc_md.setup_msi_irqs = u3msi_setup_msi_irqs; 198 ppc_md.setup_msi_irqs = u3msi_setup_msi_irqs;
204 ppc_md.teardown_msi_irqs = u3msi_teardown_msi_irqs; 199 ppc_md.teardown_msi_irqs = u3msi_teardown_msi_irqs;
205 ppc_md.msi_check_device = u3msi_msi_check_device;
206 200
207 return 0; 201 return 0;
208} 202}
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 11c888416f0a..a6a4dbda9078 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -44,6 +44,12 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
44 int irq, hwirq; 44 int irq, hwirq;
45 u64 addr; 45 u64 addr;
46 46
47 /* We don't support MSI-X */
48 if (type == PCI_CAP_ID_MSIX) {
49 pr_debug("%s: MSI-X not supported.\n", __func__);
50 return -EINVAL;
51 }
52
47 list_for_each_entry(entry, &dev->msi_list, list) { 53 list_for_each_entry(entry, &dev->msi_list, list) {
48 irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1); 54 irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
49 if (irq < 0) { 55 if (irq < 0) {
@@ -117,17 +123,6 @@ static void hsta_teardown_msi_irqs(struct pci_dev *dev)
117 } 123 }
118} 124}
119 125
120static int hsta_msi_check_device(struct pci_dev *pdev, int nvec, int type)
121{
122 /* We don't support MSI-X */
123 if (type == PCI_CAP_ID_MSIX) {
124 pr_debug("%s: MSI-X not supported.\n", __func__);
125 return -EINVAL;
126 }
127
128 return 0;
129}
130
131static int hsta_msi_probe(struct platform_device *pdev) 126static int hsta_msi_probe(struct platform_device *pdev)
132{ 127{
133 struct device *dev = &pdev->dev; 128 struct device *dev = &pdev->dev;
@@ -178,7 +173,6 @@ static int hsta_msi_probe(struct platform_device *pdev)
178 173
179 ppc_md.setup_msi_irqs = hsta_setup_msi_irqs; 174 ppc_md.setup_msi_irqs = hsta_setup_msi_irqs;
180 ppc_md.teardown_msi_irqs = hsta_teardown_msi_irqs; 175 ppc_md.teardown_msi_irqs = hsta_teardown_msi_irqs;
181 ppc_md.msi_check_device = hsta_msi_check_device;
182 return 0; 176 return 0;
183 177
184out2: 178out2:
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 43948da837a7..22b5200636e7 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -85,8 +85,12 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
85 struct msi_desc *entry; 85 struct msi_desc *entry;
86 struct ppc4xx_msi *msi_data = &ppc4xx_msi; 86 struct ppc4xx_msi *msi_data = &ppc4xx_msi;
87 87
88 msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), 88 dev_dbg(&dev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
89 GFP_KERNEL); 89 __func__, nvec, type);
90 if (type == PCI_CAP_ID_MSIX)
91 pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
92
93 msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), GFP_KERNEL);
90 if (!msi_data->msi_virqs) 94 if (!msi_data->msi_virqs)
91 return -ENOMEM; 95 return -ENOMEM;
92 96
@@ -134,16 +138,6 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
134 } 138 }
135} 139}
136 140
137static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type)
138{
139 dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
140 __func__, nvec, type);
141 if (type == PCI_CAP_ID_MSIX)
142 pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
143
144 return 0;
145}
146
147static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, 141static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
148 struct resource res, struct ppc4xx_msi *msi) 142 struct resource res, struct ppc4xx_msi *msi)
149{ 143{
@@ -259,7 +253,6 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
259 253
260 ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs; 254 ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs;
261 ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs; 255 ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
262 ppc_md.msi_check_device = ppc4xx_msi_check_device;
263 return err; 256 return err;
264 257
265error_out: 258error_out:
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 059a76c29739..7b20bccf3648 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -81,14 +81,14 @@ struct pci_ops pci_root_ops = {
81 */ 81 */
82DEFINE_RAW_SPINLOCK(pci_config_lock); 82DEFINE_RAW_SPINLOCK(pci_config_lock);
83 83
84static int can_skip_ioresource_align(const struct dmi_system_id *d) 84static int __init can_skip_ioresource_align(const struct dmi_system_id *d)
85{ 85{
86 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN; 86 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
87 printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident); 87 printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
88 return 0; 88 return 0;
89} 89}
90 90
91static const struct dmi_system_id can_skip_pciprobe_dmi_table[] = { 91static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __initconst = {
92/* 92/*
93 * Systems where PCI IO resource ISA alignment can be skipped 93 * Systems where PCI IO resource ISA alignment can be skipped
94 * when the ISA enable bit in the bridge control is not set 94 * when the ISA enable bit in the bridge control is not set
@@ -186,7 +186,7 @@ void pcibios_remove_bus(struct pci_bus *bus)
186 * on the kernel command line (which was parsed earlier). 186 * on the kernel command line (which was parsed earlier).
187 */ 187 */
188 188
189static int set_bf_sort(const struct dmi_system_id *d) 189static int __init set_bf_sort(const struct dmi_system_id *d)
190{ 190{
191 if (pci_bf_sort == pci_bf_sort_default) { 191 if (pci_bf_sort == pci_bf_sort_default) {
192 pci_bf_sort = pci_dmi_bf; 192 pci_bf_sort = pci_dmi_bf;
@@ -195,8 +195,8 @@ static int set_bf_sort(const struct dmi_system_id *d)
195 return 0; 195 return 0;
196} 196}
197 197
198static void read_dmi_type_b1(const struct dmi_header *dm, 198static void __init read_dmi_type_b1(const struct dmi_header *dm,
199 void *private_data) 199 void *private_data)
200{ 200{
201 u8 *d = (u8 *)dm + 4; 201 u8 *d = (u8 *)dm + 4;
202 202
@@ -217,7 +217,7 @@ static void read_dmi_type_b1(const struct dmi_header *dm,
217 } 217 }
218} 218}
219 219
220static int find_sort_method(const struct dmi_system_id *d) 220static int __init find_sort_method(const struct dmi_system_id *d)
221{ 221{
222 dmi_walk(read_dmi_type_b1, NULL); 222 dmi_walk(read_dmi_type_b1, NULL);
223 223
@@ -232,7 +232,7 @@ static int find_sort_method(const struct dmi_system_id *d)
232 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus) 232 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
233 */ 233 */
234#ifdef __i386__ 234#ifdef __i386__
235static int assign_all_busses(const struct dmi_system_id *d) 235static int __init assign_all_busses(const struct dmi_system_id *d)
236{ 236{
237 pci_probe |= PCI_ASSIGN_ALL_BUSSES; 237 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
238 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering" 238 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
@@ -241,7 +241,7 @@ static int assign_all_busses(const struct dmi_system_id *d)
241} 241}
242#endif 242#endif
243 243
244static int set_scan_all(const struct dmi_system_id *d) 244static int __init set_scan_all(const struct dmi_system_id *d)
245{ 245{
246 printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n", 246 printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n",
247 d->ident); 247 d->ident);
@@ -249,7 +249,7 @@ static int set_scan_all(const struct dmi_system_id *d)
249 return 0; 249 return 0;
250} 250}
251 251
252static const struct dmi_system_id pciprobe_dmi_table[] = { 252static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
253#ifdef __i386__ 253#ifdef __i386__
254/* 254/*
255 * Laptops which need pci=assign-busses to see Cardbus cards 255 * Laptops which need pci=assign-busses to see Cardbus cards
@@ -512,7 +512,7 @@ int __init pcibios_init(void)
512 return 0; 512 return 0;
513} 513}
514 514
515char * __init pcibios_setup(char *str) 515char *__init pcibios_setup(char *str)
516{ 516{
517 if (!strcmp(str, "off")) { 517 if (!strcmp(str, "off")) {
518 pci_probe = 0; 518 pci_probe = 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 248642f4bab7..326198a4434e 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -31,7 +31,7 @@ static DEFINE_MUTEX(pci_mmcfg_lock);
31 31
32LIST_HEAD(pci_mmcfg_list); 32LIST_HEAD(pci_mmcfg_list);
33 33
34static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg) 34static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
35{ 35{
36 if (cfg->res.parent) 36 if (cfg->res.parent)
37 release_resource(&cfg->res); 37 release_resource(&cfg->res);
@@ -39,7 +39,7 @@ static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
39 kfree(cfg); 39 kfree(cfg);
40} 40}
41 41
42static __init void free_all_mmcfg(void) 42static void __init free_all_mmcfg(void)
43{ 43{
44 struct pci_mmcfg_region *cfg, *tmp; 44 struct pci_mmcfg_region *cfg, *tmp;
45 45
@@ -93,7 +93,7 @@ static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
93 return new; 93 return new;
94} 94}
95 95
96static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, 96static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
97 int end, u64 addr) 97 int end, u64 addr)
98{ 98{
99 struct pci_mmcfg_region *new; 99 struct pci_mmcfg_region *new;
@@ -125,7 +125,7 @@ struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
125 return NULL; 125 return NULL;
126} 126}
127 127
128static const char __init *pci_mmcfg_e7520(void) 128static const char *__init pci_mmcfg_e7520(void)
129{ 129{
130 u32 win; 130 u32 win;
131 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); 131 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
@@ -140,7 +140,7 @@ static const char __init *pci_mmcfg_e7520(void)
140 return "Intel Corporation E7520 Memory Controller Hub"; 140 return "Intel Corporation E7520 Memory Controller Hub";
141} 141}
142 142
143static const char __init *pci_mmcfg_intel_945(void) 143static const char *__init pci_mmcfg_intel_945(void)
144{ 144{
145 u32 pciexbar, mask = 0, len = 0; 145 u32 pciexbar, mask = 0, len = 0;
146 146
@@ -184,7 +184,7 @@ static const char __init *pci_mmcfg_intel_945(void)
184 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; 184 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
185} 185}
186 186
187static const char __init *pci_mmcfg_amd_fam10h(void) 187static const char *__init pci_mmcfg_amd_fam10h(void)
188{ 188{
189 u32 low, high, address; 189 u32 low, high, address;
190 u64 base, msr; 190 u64 base, msr;
@@ -235,21 +235,25 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
235} 235}
236 236
237static bool __initdata mcp55_checked; 237static bool __initdata mcp55_checked;
238static const char __init *pci_mmcfg_nvidia_mcp55(void) 238static const char *__init pci_mmcfg_nvidia_mcp55(void)
239{ 239{
240 int bus; 240 int bus;
241 int mcp55_mmconf_found = 0; 241 int mcp55_mmconf_found = 0;
242 242
243 static const u32 extcfg_regnum = 0x90; 243 static const u32 extcfg_regnum __initconst = 0x90;
244 static const u32 extcfg_regsize = 4; 244 static const u32 extcfg_regsize __initconst = 4;
245 static const u32 extcfg_enable_mask = 1<<31; 245 static const u32 extcfg_enable_mask __initconst = 1 << 31;
246 static const u32 extcfg_start_mask = 0xff<<16; 246 static const u32 extcfg_start_mask __initconst = 0xff << 16;
247 static const int extcfg_start_shift = 16; 247 static const int extcfg_start_shift __initconst = 16;
248 static const u32 extcfg_size_mask = 0x3<<28; 248 static const u32 extcfg_size_mask __initconst = 0x3 << 28;
249 static const int extcfg_size_shift = 28; 249 static const int extcfg_size_shift __initconst = 28;
250 static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20}; 250 static const int extcfg_sizebus[] __initconst = {
251 static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff}; 251 0x100, 0x80, 0x40, 0x20
252 static const int extcfg_base_lshift = 25; 252 };
253 static const u32 extcfg_base_mask[] __initconst = {
254 0x7ff8, 0x7ffc, 0x7ffe, 0x7fff
255 };
256 static const int extcfg_base_lshift __initconst = 25;
253 257
254 /* 258 /*
255 * do check if amd fam10h already took over 259 * do check if amd fam10h already took over
@@ -302,7 +306,7 @@ struct pci_mmcfg_hostbridge_probe {
302 const char *(*probe)(void); 306 const char *(*probe)(void);
303}; 307};
304 308
305static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { 309static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = {
306 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, 310 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
307 PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 }, 311 PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
308 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, 312 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index c77b24a8b2da..9b83b9051ae7 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -79,13 +79,13 @@ union bios32 {
79static struct { 79static struct {
80 unsigned long address; 80 unsigned long address;
81 unsigned short segment; 81 unsigned short segment;
82} bios32_indirect = { 0, __KERNEL_CS }; 82} bios32_indirect __initdata = { 0, __KERNEL_CS };
83 83
84/* 84/*
85 * Returns the entry point for the given service, NULL on error 85 * Returns the entry point for the given service, NULL on error
86 */ 86 */
87 87
88static unsigned long bios32_service(unsigned long service) 88static unsigned long __init bios32_service(unsigned long service)
89{ 89{
90 unsigned char return_code; /* %al */ 90 unsigned char return_code; /* %al */
91 unsigned long address; /* %ebx */ 91 unsigned long address; /* %ebx */
@@ -124,7 +124,7 @@ static struct {
124 124
125static int pci_bios_present; 125static int pci_bios_present;
126 126
127static int check_pcibios(void) 127static int __init check_pcibios(void)
128{ 128{
129 u32 signature, eax, ebx, ecx; 129 u32 signature, eax, ebx, ecx;
130 u8 status, major_ver, minor_ver, hw_mech; 130 u8 status, major_ver, minor_ver, hw_mech;
@@ -312,7 +312,7 @@ static const struct pci_raw_ops pci_bios_access = {
312 * Try to find PCI BIOS. 312 * Try to find PCI BIOS.
313 */ 313 */
314 314
315static const struct pci_raw_ops *pci_find_bios(void) 315static const struct pci_raw_ops *__init pci_find_bios(void)
316{ 316{
317 union bios32 *check; 317 union bios32 *check;
318 unsigned char sum; 318 unsigned char sum;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 11323dd5196f..e4259c2c1acc 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -35,7 +35,6 @@
35/* 35/*
36 * PCI device IDs. 36 * PCI device IDs.
37 */ 37 */
38#define PCI_VENDOR_ID_VMWARE 0x15AD
39#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 38#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
40 39
41/* 40/*
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index fa75a29a0408..3e238cd049e6 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -136,6 +136,10 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
136 struct msi_msg msg; 136 struct msi_msg msg;
137 int virq, hwirq; 137 int virq, hwirq;
138 138
139 /* We support MSI, but not MSI-X */
140 if (desc->msi_attrib.is_msix)
141 return -EINVAL;
142
139 hwirq = armada_370_xp_alloc_msi(); 143 hwirq = armada_370_xp_alloc_msi();
140 if (hwirq < 0) 144 if (hwirq < 0)
141 return hwirq; 145 return hwirq;
@@ -166,15 +170,6 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
166 armada_370_xp_free_msi(hwirq); 170 armada_370_xp_free_msi(hwirq);
167} 171}
168 172
169static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
170 int nvec, int type)
171{
172 /* We support MSI, but not MSI-X */
173 if (type == PCI_CAP_ID_MSI)
174 return 0;
175 return -EINVAL;
176}
177
178static struct irq_chip armada_370_xp_msi_irq_chip = { 173static struct irq_chip armada_370_xp_msi_irq_chip = {
179 .name = "armada_370_xp_msi_irq", 174 .name = "armada_370_xp_msi_irq",
180 .irq_enable = unmask_msi_irq, 175 .irq_enable = unmask_msi_irq,
@@ -213,7 +208,6 @@ static int armada_370_xp_msi_init(struct device_node *node,
213 208
214 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 209 msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
215 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 210 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
216 msi_chip->check_device = armada_370_xp_check_msi_device;
217 msi_chip->of_node = node; 211 msi_chip->of_node = node;
218 212
219 armada_370_xp_msi_domain = 213 armada_370_xp_msi_domain =
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 248399a881af..189b32519748 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -35,7 +35,6 @@
35#include "vmci_driver.h" 35#include "vmci_driver.h"
36#include "vmci_event.h" 36#include "vmci_event.h"
37 37
38#define PCI_VENDOR_ID_VMWARE 0x15AD
39#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 38#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
40 39
41#define VMCI_UTIL_NUM_RESOURCES 1 40#define VMCI_UTIL_NUM_RESOURCES 1
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3759479f959a..5f0199f6c31e 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -117,7 +117,6 @@ enum {
117/* 117/*
118 * PCI vendor and device IDs. 118 * PCI vendor and device IDs.
119 */ 119 */
120#define PCI_VENDOR_ID_VMWARE 0x15AD
121#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 120#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
122#define MAX_ETHERNET_CARDS 10 121#define MAX_ETHERNET_CARDS 10
123#define MAX_PCI_PASSTHRU_DEVICE 6 122#define MAX_PCI_PASSTHRU_DEVICE 6
diff --git a/drivers/of/address.c b/drivers/of/address.c
index e3718250d66e..afdb78299f61 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -5,6 +5,8 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/of_address.h> 6#include <linux/of_address.h>
7#include <linux/pci_regs.h> 7#include <linux/pci_regs.h>
8#include <linux/sizes.h>
9#include <linux/slab.h>
8#include <linux/string.h> 10#include <linux/string.h>
9 11
10/* Max address size we deal with */ 12/* Max address size we deal with */
@@ -293,6 +295,51 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
293} 295}
294EXPORT_SYMBOL_GPL(of_pci_range_parser_one); 296EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
295 297
298/*
299 * of_pci_range_to_resource - Create a resource from an of_pci_range
300 * @range: the PCI range that describes the resource
301 * @np: device node where the range belongs to
302 * @res: pointer to a valid resource that will be updated to
303 * reflect the values contained in the range.
304 *
305 * Returns EINVAL if the range cannot be converted to resource.
306 *
307 * Note that if the range is an IO range, the resource will be converted
308 * using pci_address_to_pio() which can fail if it is called too early or
309 * if the range cannot be matched to any host bridge IO space (our case here).
310 * To guard against that we try to register the IO range first.
311 * If that fails we know that pci_address_to_pio() will do too.
312 */
313int of_pci_range_to_resource(struct of_pci_range *range,
314 struct device_node *np, struct resource *res)
315{
316 int err;
317 res->flags = range->flags;
318 res->parent = res->child = res->sibling = NULL;
319 res->name = np->full_name;
320
321 if (res->flags & IORESOURCE_IO) {
322 unsigned long port;
323 err = pci_register_io_range(range->cpu_addr, range->size);
324 if (err)
325 goto invalid_range;
326 port = pci_address_to_pio(range->cpu_addr);
327 if (port == (unsigned long)-1) {
328 err = -EINVAL;
329 goto invalid_range;
330 }
331 res->start = port;
332 } else {
333 res->start = range->cpu_addr;
334 }
335 res->end = res->start + range->size - 1;
336 return 0;
337
338invalid_range:
339 res->start = (resource_size_t)OF_BAD_ADDR;
340 res->end = (resource_size_t)OF_BAD_ADDR;
341 return err;
342}
296#endif /* CONFIG_PCI */ 343#endif /* CONFIG_PCI */
297 344
298/* 345/*
@@ -601,12 +648,119 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
601} 648}
602EXPORT_SYMBOL(of_get_address); 649EXPORT_SYMBOL(of_get_address);
603 650
651#ifdef PCI_IOBASE
652struct io_range {
653 struct list_head list;
654 phys_addr_t start;
655 resource_size_t size;
656};
657
658static LIST_HEAD(io_range_list);
659static DEFINE_SPINLOCK(io_range_lock);
660#endif
661
662/*
663 * Record the PCI IO range (expressed as CPU physical address + size).
664 * Return a negative value if an error has occured, zero otherwise
665 */
666int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
667{
668 int err = 0;
669
670#ifdef PCI_IOBASE
671 struct io_range *range;
672 resource_size_t allocated_size = 0;
673
674 /* check if the range hasn't been previously recorded */
675 spin_lock(&io_range_lock);
676 list_for_each_entry(range, &io_range_list, list) {
677 if (addr >= range->start && addr + size <= range->start + size) {
678 /* range already registered, bail out */
679 goto end_register;
680 }
681 allocated_size += range->size;
682 }
683
684 /* range not registed yet, check for available space */
685 if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
686 /* if it's too big check if 64K space can be reserved */
687 if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
688 err = -E2BIG;
689 goto end_register;
690 }
691
692 size = SZ_64K;
693 pr_warn("Requested IO range too big, new size set to 64K\n");
694 }
695
696 /* add the range to the list */
697 range = kzalloc(sizeof(*range), GFP_KERNEL);
698 if (!range) {
699 err = -ENOMEM;
700 goto end_register;
701 }
702
703 range->start = addr;
704 range->size = size;
705
706 list_add_tail(&range->list, &io_range_list);
707
708end_register:
709 spin_unlock(&io_range_lock);
710#endif
711
712 return err;
713}
714
715phys_addr_t pci_pio_to_address(unsigned long pio)
716{
717 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
718
719#ifdef PCI_IOBASE
720 struct io_range *range;
721 resource_size_t allocated_size = 0;
722
723 if (pio > IO_SPACE_LIMIT)
724 return address;
725
726 spin_lock(&io_range_lock);
727 list_for_each_entry(range, &io_range_list, list) {
728 if (pio >= allocated_size && pio < allocated_size + range->size) {
729 address = range->start + pio - allocated_size;
730 break;
731 }
732 allocated_size += range->size;
733 }
734 spin_unlock(&io_range_lock);
735#endif
736
737 return address;
738}
739
604unsigned long __weak pci_address_to_pio(phys_addr_t address) 740unsigned long __weak pci_address_to_pio(phys_addr_t address)
605{ 741{
742#ifdef PCI_IOBASE
743 struct io_range *res;
744 resource_size_t offset = 0;
745 unsigned long addr = -1;
746
747 spin_lock(&io_range_lock);
748 list_for_each_entry(res, &io_range_list, list) {
749 if (address >= res->start && address < res->start + res->size) {
750 addr = res->start - address + offset;
751 break;
752 }
753 offset += res->size;
754 }
755 spin_unlock(&io_range_lock);
756
757 return addr;
758#else
606 if (address > IO_SPACE_LIMIT) 759 if (address > IO_SPACE_LIMIT)
607 return (unsigned long)-1; 760 return (unsigned long)-1;
608 761
609 return (unsigned long) address; 762 return (unsigned long) address;
763#endif
610} 764}
611 765
612static int __of_address_to_resource(struct device_node *dev, 766static int __of_address_to_resource(struct device_node *dev,
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 848199633798..8882b467be95 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -1,7 +1,9 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/export.h> 2#include <linux/export.h>
3#include <linux/of.h> 3#include <linux/of.h>
4#include <linux/of_address.h>
4#include <linux/of_pci.h> 5#include <linux/of_pci.h>
6#include <linux/slab.h>
5 7
6static inline int __of_pci_pci_compare(struct device_node *node, 8static inline int __of_pci_pci_compare(struct device_node *node,
7 unsigned int data) 9 unsigned int data)
@@ -89,6 +91,146 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
89} 91}
90EXPORT_SYMBOL_GPL(of_pci_parse_bus_range); 92EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
91 93
94/**
95 * This function will try to obtain the host bridge domain number by
96 * finding a property called "linux,pci-domain" of the given device node.
97 *
98 * @node: device tree node with the domain information
99 *
100 * Returns the associated domain number from DT in the range [0-0xffff], or
101 * a negative value if the required property is not found.
102 */
103int of_get_pci_domain_nr(struct device_node *node)
104{
105 const __be32 *value;
106 int len;
107 u16 domain;
108
109 value = of_get_property(node, "linux,pci-domain", &len);
110 if (!value || len < sizeof(*value))
111 return -EINVAL;
112
113 domain = (u16)be32_to_cpup(value);
114
115 return domain;
116}
117EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
118
119#if defined(CONFIG_OF_ADDRESS)
120/**
121 * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
122 * @dev: device node of the host bridge having the range property
123 * @busno: bus number associated with the bridge root bus
124 * @bus_max: maximum number of buses for this bridge
125 * @resources: list where the range of resources will be added after DT parsing
126 * @io_base: pointer to a variable that will contain on return the physical
127 * address for the start of the I/O range. Can be NULL if the caller doesn't
128 * expect IO ranges to be present in the device tree.
129 *
130 * It is the caller's job to free the @resources list.
131 *
132 * This function will parse the "ranges" property of a PCI host bridge device
133 * node and setup the resource mapping based on its content. It is expected
134 * that the property conforms with the Power ePAPR document.
135 *
136 * It returns zero if the range parsing has been successful or a standard error
137 * value if it failed.
138 */
139int of_pci_get_host_bridge_resources(struct device_node *dev,
140 unsigned char busno, unsigned char bus_max,
141 struct list_head *resources, resource_size_t *io_base)
142{
143 struct resource *res;
144 struct resource *bus_range;
145 struct of_pci_range range;
146 struct of_pci_range_parser parser;
147 char range_type[4];
148 int err;
149
150 if (io_base)
151 *io_base = (resource_size_t)OF_BAD_ADDR;
152
153 bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
154 if (!bus_range)
155 return -ENOMEM;
156
157 pr_info("PCI host bridge %s ranges:\n", dev->full_name);
158
159 err = of_pci_parse_bus_range(dev, bus_range);
160 if (err) {
161 bus_range->start = busno;
162 bus_range->end = bus_max;
163 bus_range->flags = IORESOURCE_BUS;
164 pr_info(" No bus range found for %s, using %pR\n",
165 dev->full_name, bus_range);
166 } else {
167 if (bus_range->end > bus_range->start + bus_max)
168 bus_range->end = bus_range->start + bus_max;
169 }
170 pci_add_resource(resources, bus_range);
171
172 /* Check for ranges property */
173 err = of_pci_range_parser_init(&parser, dev);
174 if (err)
175 goto parse_failed;
176
177 pr_debug("Parsing ranges property...\n");
178 for_each_of_pci_range(&parser, &range) {
179 /* Read next ranges element */
180 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
181 snprintf(range_type, 4, " IO");
182 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
183 snprintf(range_type, 4, "MEM");
184 else
185 snprintf(range_type, 4, "err");
186 pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
187 range.cpu_addr, range.cpu_addr + range.size - 1,
188 range.pci_addr);
189
190 /*
191 * If we failed translation or got a zero-sized region
192 * then skip this range
193 */
194 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
195 continue;
196
197 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
198 if (!res) {
199 err = -ENOMEM;
200 goto parse_failed;
201 }
202
203 err = of_pci_range_to_resource(&range, dev, res);
204 if (err)
205 goto conversion_failed;
206
207 if (resource_type(res) == IORESOURCE_IO) {
208 if (!io_base) {
209 pr_err("I/O range found for %s. Please provide an io_base pointer to save CPU base address\n",
210 dev->full_name);
211 err = -EINVAL;
212 goto conversion_failed;
213 }
214 if (*io_base != (resource_size_t)OF_BAD_ADDR)
215 pr_warn("More than one I/O resource converted for %s. CPU base address for old range lost!\n",
216 dev->full_name);
217 *io_base = range.cpu_addr;
218 }
219
220 pci_add_resource_offset(resources, res, res->start - range.pci_addr);
221 }
222
223 return 0;
224
225conversion_failed:
226 kfree(res);
227parse_failed:
228 pci_free_resource_list(resources);
229 return err;
230}
231EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
232#endif /* CONFIG_OF_ADDRESS */
233
92#ifdef CONFIG_PCI_MSI 234#ifdef CONFIG_PCI_MSI
93 235
94static LIST_HEAD(of_pci_msi_chip_list); 236static LIST_HEAD(of_pci_msi_chip_list);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 90f5ccacce4b..3dc25fad490c 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -63,4 +63,32 @@ config PCIE_SPEAR13XX
63 help 63 help
64 Say Y here if you want PCIe support on SPEAr13XX SoCs. 64 Say Y here if you want PCIe support on SPEAr13XX SoCs.
65 65
66config PCI_KEYSTONE
67 bool "TI Keystone PCIe controller"
68 depends on ARCH_KEYSTONE
69 select PCIE_DW
70 select PCIEPORTBUS
71 help
72 Say Y here if you want to enable PCI controller support on Keystone
73 SoCs. The PCI controller on Keystone is based on Designware hardware
74 and therefore the driver re-uses the Designware core functions to
75 implement the driver.
76
77config PCIE_XILINX
78 bool "Xilinx AXI PCIe host bridge support"
79 depends on ARCH_ZYNQ
80 help
81 Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
82 Host Bridge driver.
83
84config PCI_XGENE
85 bool "X-Gene PCIe controller"
86 depends on ARCH_XGENE
87 depends on OF
88 select PCIEPORTBUS
89 help
90 Say Y here if you want internal PCI support on APM X-Gene SoC.
91 There are 5 internal PCIe ports available. Each port is GEN3 capable
92 and have varied lanes from x1 to x8.
93
66endmenu 94endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d0e88f114ff9..26b3461d68d7 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -8,3 +8,6 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
8obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o 8obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
9obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o 9obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
10obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o 10obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
11obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 35fc73a8d0b3..233fe8a88264 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -257,11 +257,6 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
257 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 257 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
258 int ret; 258 int ret;
259 259
260 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
261 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
262 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
263 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
264
265 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 260 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
266 if (ret) { 261 if (ret) {
267 dev_err(pp->dev, "unable to enable pcie_phy clock\n"); 262 dev_err(pp->dev, "unable to enable pcie_phy clock\n");
@@ -283,6 +278,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
283 /* allow the clocks to stabilize */ 278 /* allow the clocks to stabilize */
284 usleep_range(200, 500); 279 usleep_range(200, 500);
285 280
281 /* power up core phy and enable ref clock */
282 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
283 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
284 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
285 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
286
286 /* Some boards don't have PCIe reset GPIO. */ 287 /* Some boards don't have PCIe reset GPIO. */
287 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 288 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
288 gpio_set_value(imx6_pcie->reset_gpio, 0); 289 gpio_set_value(imx6_pcie->reset_gpio, 0);
@@ -647,7 +648,7 @@ static int __init imx6_pcie_init(void)
647{ 648{
648 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); 649 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
649} 650}
650fs_initcall(imx6_pcie_init); 651module_init(imx6_pcie_init);
651 652
652MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>"); 653MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
653MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver"); 654MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
new file mode 100644
index 000000000000..34086ce88e8e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -0,0 +1,516 @@
1/*
2 * Designware application register space functions for Keystone PCI controller
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_pci.h>
20#include <linux/pci.h>
21#include <linux/platform_device.h>
22
23#include "pcie-designware.h"
24#include "pci-keystone.h"
25
26/* Application register defines */
27#define LTSSM_EN_VAL 1
28#define LTSSM_STATE_MASK 0x1f
29#define LTSSM_STATE_L0 0x11
30#define DBI_CS2_EN_VAL 0x20
31#define OB_XLAT_EN_VAL 2
32
33/* Application registers */
34#define CMD_STATUS 0x004
35#define CFG_SETUP 0x008
36#define OB_SIZE 0x030
37#define CFG_PCIM_WIN_SZ_IDX 3
38#define CFG_PCIM_WIN_CNT 32
39#define SPACE0_REMOTE_CFG_OFFSET 0x1000
40#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
41#define OB_OFFSET_HI(n) (0x204 + (8 * n))
42
43/* IRQ register defines */
44#define IRQ_EOI 0x050
45#define IRQ_STATUS 0x184
46#define IRQ_ENABLE_SET 0x188
47#define IRQ_ENABLE_CLR 0x18c
48
49#define MSI_IRQ 0x054
50#define MSI0_IRQ_STATUS 0x104
51#define MSI0_IRQ_ENABLE_SET 0x108
52#define MSI0_IRQ_ENABLE_CLR 0x10c
53#define IRQ_STATUS 0x184
54#define MSI_IRQ_OFFSET 4
55
56/* Config space registers */
57#define DEBUG0 0x728
58
59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
60
61static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
62{
63 return sys->private_data;
64}
65
66static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
67 u32 *bit_pos)
68{
69 *reg_offset = offset % 8;
70 *bit_pos = offset >> 3;
71}
72
73u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
74{
75 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
76
77 return ks_pcie->app.start + MSI_IRQ;
78}
79
80void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
81{
82 struct pcie_port *pp = &ks_pcie->pp;
83 u32 pending, vector;
84 int src, virq;
85
86 pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
87
88 /*
89 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
90 * shows 1, 9, 17, 25 and so forth
91 */
92 for (src = 0; src < 4; src++) {
93 if (BIT(src) & pending) {
94 vector = offset + (src << 3);
95 virq = irq_linear_revmap(pp->irq_domain, vector);
96 dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
97 src, vector, virq);
98 generic_handle_irq(virq);
99 }
100 }
101}
102
103static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
104{
105 u32 offset, reg_offset, bit_pos;
106 struct keystone_pcie *ks_pcie;
107 unsigned int irq = d->irq;
108 struct msi_desc *msi;
109 struct pcie_port *pp;
110
111 msi = irq_get_msi_desc(irq);
112 pp = sys_to_pcie(msi->dev->bus->sysdata);
113 ks_pcie = to_keystone_pcie(pp);
114 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
115 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
116
117 writel(BIT(bit_pos),
118 ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
119 writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
120}
121
122void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
123{
124 u32 reg_offset, bit_pos;
125 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
126
127 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
128 writel(BIT(bit_pos),
129 ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
130}
131
132void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
133{
134 u32 reg_offset, bit_pos;
135 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
136
137 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
138 writel(BIT(bit_pos),
139 ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
140}
141
142static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
143{
144 struct keystone_pcie *ks_pcie;
145 unsigned int irq = d->irq;
146 struct msi_desc *msi;
147 struct pcie_port *pp;
148 u32 offset;
149
150 msi = irq_get_msi_desc(irq);
151 pp = sys_to_pcie(msi->dev->bus->sysdata);
152 ks_pcie = to_keystone_pcie(pp);
153 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
154
155 /* Mask the end point if PVM implemented */
156 if (IS_ENABLED(CONFIG_PCI_MSI)) {
157 if (msi->msi_attrib.maskbit)
158 mask_msi_irq(d);
159 }
160
161 ks_dw_pcie_msi_clear_irq(pp, offset);
162}
163
164static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
165{
166 struct keystone_pcie *ks_pcie;
167 unsigned int irq = d->irq;
168 struct msi_desc *msi;
169 struct pcie_port *pp;
170 u32 offset;
171
172 msi = irq_get_msi_desc(irq);
173 pp = sys_to_pcie(msi->dev->bus->sysdata);
174 ks_pcie = to_keystone_pcie(pp);
175 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
176
177 /* Mask the end point if PVM implemented */
178 if (IS_ENABLED(CONFIG_PCI_MSI)) {
179 if (msi->msi_attrib.maskbit)
180 unmask_msi_irq(d);
181 }
182
183 ks_dw_pcie_msi_set_irq(pp, offset);
184}
185
186static struct irq_chip ks_dw_pcie_msi_irq_chip = {
187 .name = "Keystone-PCIe-MSI-IRQ",
188 .irq_ack = ks_dw_pcie_msi_irq_ack,
189 .irq_mask = ks_dw_pcie_msi_irq_mask,
190 .irq_unmask = ks_dw_pcie_msi_irq_unmask,
191};
192
193static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
194 irq_hw_number_t hwirq)
195{
196 irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
197 handle_level_irq);
198 irq_set_chip_data(irq, domain->host_data);
199 set_irq_flags(irq, IRQF_VALID);
200
201 return 0;
202}
203
204const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
205 .map = ks_dw_pcie_msi_map,
206};
207
208int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip)
209{
210 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
211 int i;
212
213 pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
214 MAX_MSI_IRQS,
215 &ks_dw_pcie_msi_domain_ops,
216 chip);
217 if (!pp->irq_domain) {
218 dev_err(pp->dev, "irq domain init failed\n");
219 return -ENXIO;
220 }
221
222 for (i = 0; i < MAX_MSI_IRQS; i++)
223 irq_create_mapping(pp->irq_domain, i);
224
225 return 0;
226}
227
228void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
229{
230 int i;
231
232 for (i = 0; i < MAX_LEGACY_IRQS; i++)
233 writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
234}
235
236void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
237{
238 struct pcie_port *pp = &ks_pcie->pp;
239 u32 pending;
240 int virq;
241
242 pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
243
244 if (BIT(0) & pending) {
245 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
246 dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
247 virq);
248 generic_handle_irq(virq);
249 }
250
251 /* EOI the INTx interrupt */
252 writel(offset, ks_pcie->va_app_base + IRQ_EOI);
253}
254
255static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
256{
257}
258
259static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
260{
261}
262
263static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
264{
265}
266
267static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
268 .name = "Keystone-PCI-Legacy-IRQ",
269 .irq_ack = ks_dw_pcie_ack_legacy_irq,
270 .irq_mask = ks_dw_pcie_mask_legacy_irq,
271 .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
272};
273
274static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
275 unsigned int irq, irq_hw_number_t hw_irq)
276{
277 irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
278 handle_level_irq);
279 irq_set_chip_data(irq, d->host_data);
280 set_irq_flags(irq, IRQF_VALID);
281
282 return 0;
283}
284
285static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
286 .map = ks_dw_pcie_init_legacy_irq_map,
287 .xlate = irq_domain_xlate_onetwocell,
288};
289
290/**
291 * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
292 * registers
293 *
294 * Since modification of dbi_cs2 involves different clock domain, read the
295 * status back to ensure the transition is complete.
296 */
297static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
298{
299 u32 val;
300
301 writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
302 reg_virt + CMD_STATUS);
303
304 do {
305 val = readl(reg_virt + CMD_STATUS);
306 } while (!(val & DBI_CS2_EN_VAL));
307}
308
309/**
310 * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
311 *
312 * Since modification of dbi_cs2 involves different clock domain, read the
313 * status back to ensure the transition is complete.
314 */
315static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
316{
317 u32 val;
318
319 writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
320 reg_virt + CMD_STATUS);
321
322 do {
323 val = readl(reg_virt + CMD_STATUS);
324 } while (val & DBI_CS2_EN_VAL);
325}
326
327void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
328{
329 struct pcie_port *pp = &ks_pcie->pp;
330 u32 start = pp->mem.start, end = pp->mem.end;
331 int i, tr_size;
332
333 /* Disable BARs for inbound access */
334 ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
335 writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
336 writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
337 ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
338
339 /* Set outbound translation size per window division */
340 writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
341
342 tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
343
344 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
345 for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
346 writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
347 writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
348 start += tr_size;
349 }
350
351 /* Enable OB translation */
352 writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
353 ks_pcie->va_app_base + CMD_STATUS);
354}
355
356/**
357 * ks_pcie_cfg_setup() - Set up configuration space address for a device
358 *
359 * @ks_pcie: ptr to keystone_pcie structure
360 * @bus: Bus number the device is residing on
361 * @devfn: device, function number info
362 *
363 * Forms and returns the address of configuration space mapped in PCIESS
364 * address space 0. Also configures CFG_SETUP for remote configuration space
365 * access.
366 *
367 * The address space has two regions to access configuration - local and remote.
368 * We access local region for bus 0 (as RC is attached on bus 0) and remote
369 * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
370 * we will do TYPE 0 access as it will be on our secondary bus (logical).
371 * CFG_SETUP is needed only for remote configuration access.
372 */
373static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
374 unsigned int devfn)
375{
376 u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
377 struct pcie_port *pp = &ks_pcie->pp;
378 u32 regval;
379
380 if (bus == 0)
381 return pp->dbi_base;
382
383 regval = (bus << 16) | (device << 8) | function;
384
385 /*
386 * Since Bus#1 will be a virtual bus, we need to have TYPE0
387 * access only.
388 * TYPE 1
389 */
390 if (bus != 1)
391 regval |= BIT(24);
392
393 writel(regval, ks_pcie->va_app_base + CFG_SETUP);
394 return pp->va_cfg0_base;
395}
396
397int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
398 unsigned int devfn, int where, int size, u32 *val)
399{
400 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
401 u8 bus_num = bus->number;
402 void __iomem *addr;
403
404 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
405
406 return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
407}
408
409int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
410 unsigned int devfn, int where, int size, u32 val)
411{
412 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
413 u8 bus_num = bus->number;
414 void __iomem *addr;
415
416 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
417
418 return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
419}
420
421/**
422 * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
423 *
424 * This sets BAR0 to enable inbound access for MSI_IRQ register
425 */
426void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
427{
428 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
429
430 /* Configure and set up BAR0 */
431 ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
432
433 /* Enable BAR0 */
434 writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
435 writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
436
437 ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
438
439 /*
440 * For BAR0, just setting bus address for inbound writes (MSI) should
441 * be sufficient. Use physical address to avoid any conflicts.
442 */
443 writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
444}
445
446/**
447 * ks_dw_pcie_link_up() - Check if link up
448 */
449int ks_dw_pcie_link_up(struct pcie_port *pp)
450{
451 u32 val = readl(pp->dbi_base + DEBUG0);
452
453 return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
454}
455
456void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
457{
458 u32 val;
459
460 /* Disable Link training */
461 val = readl(ks_pcie->va_app_base + CMD_STATUS);
462 val &= ~LTSSM_EN_VAL;
463 writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
464
465 /* Initiate Link Training */
466 val = readl(ks_pcie->va_app_base + CMD_STATUS);
467 writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
468}
469
470/**
471 * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
472 *
473 * Ioremap the register resources, initialize legacy irq domain
474 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
475 * PCI host controller.
476 */
477int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
478 struct device_node *msi_intc_np)
479{
480 struct pcie_port *pp = &ks_pcie->pp;
481 struct platform_device *pdev = to_platform_device(pp->dev);
482 struct resource *res;
483
484 /* Index 0 is the config reg. space address */
485 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
486 pp->dbi_base = devm_ioremap_resource(pp->dev, res);
487 if (IS_ERR(pp->dbi_base))
488 return PTR_ERR(pp->dbi_base);
489
490 /*
491 * We set these same and is used in pcie rd/wr_other_conf
492 * functions
493 */
494 pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
495 pp->va_cfg1_base = pp->va_cfg0_base;
496
497 /* Index 1 is the application reg. space address */
498 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
499 ks_pcie->app = *res;
500 ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
501 if (IS_ERR(ks_pcie->va_app_base))
502 return PTR_ERR(ks_pcie->va_app_base);
503
504 /* Create legacy IRQ domain */
505 ks_pcie->legacy_irq_domain =
506 irq_domain_add_linear(ks_pcie->legacy_intc_np,
507 MAX_LEGACY_IRQS,
508 &ks_dw_pcie_legacy_irq_domain_ops,
509 NULL);
510 if (!ks_pcie->legacy_irq_domain) {
511 dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
512 return -EINVAL;
513 }
514
515 return dw_pcie_host_init(pp);
516}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
new file mode 100644
index 000000000000..1b893bc8b842
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.c
@@ -0,0 +1,415 @@
1/*
2 * PCIe host controller driver for Texas Instruments Keystone SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 * Implementation based on pci-exynos.c and pcie-designware.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/irqchip/chained_irq.h>
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/irqdomain.h>
19#include <linux/module.h>
20#include <linux/msi.h>
21#include <linux/of_irq.h>
22#include <linux/of.h>
23#include <linux/of_pci.h>
24#include <linux/platform_device.h>
25#include <linux/phy/phy.h>
26#include <linux/resource.h>
27#include <linux/signal.h>
28
29#include "pcie-designware.h"
30#include "pci-keystone.h"
31
32#define DRIVER_NAME "keystone-pcie"
33
34/* driver specific constants */
35#define MAX_MSI_HOST_IRQS 8
36#define MAX_LEGACY_HOST_IRQS 4
37
38/* DEV_STAT_CTRL */
39#define PCIE_CAP_BASE 0x70
40
41/* PCIE controller device IDs */
42#define PCIE_RC_K2HK 0xb008
43#define PCIE_RC_K2E 0xb009
44#define PCIE_RC_K2L 0xb00a
45
46#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
47
48static void quirk_limit_mrrs(struct pci_dev *dev)
49{
50 struct pci_bus *bus = dev->bus;
51 struct pci_dev *bridge = bus->self;
52 static const struct pci_device_id rc_pci_devids[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
54 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
55 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
56 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
57 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
58 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
59 { 0, },
60 };
61
62 if (pci_is_root_bus(bus))
63 return;
64
65 /* look for the host bridge */
66 while (!pci_is_root_bus(bus)) {
67 bridge = bus->self;
68 bus = bus->parent;
69 }
70
71 if (bridge) {
72 /*
73 * Keystone PCI controller has a h/w limitation of
74 * 256 bytes maximum read request size. It can't handle
75 * anything higher than this. So force this limit on
76 * all downstream devices.
77 */
78 if (pci_match_id(rc_pci_devids, bridge)) {
79 if (pcie_get_readrq(dev) > 256) {
80 dev_info(&dev->dev, "limiting MRRS to 256\n");
81 pcie_set_readrq(dev, 256);
82 }
83 }
84 }
85}
86DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
87
88static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
89{
90 struct pcie_port *pp = &ks_pcie->pp;
91 int count = 200;
92
93 dw_pcie_setup_rc(pp);
94
95 if (dw_pcie_link_up(pp)) {
96 dev_err(pp->dev, "Link already up\n");
97 return 0;
98 }
99
100 ks_dw_pcie_initiate_link_train(ks_pcie);
101 /* check if the link is up or not */
102 while (!dw_pcie_link_up(pp)) {
103 usleep_range(100, 1000);
104 if (--count) {
105 ks_dw_pcie_initiate_link_train(ks_pcie);
106 continue;
107 }
108 dev_err(pp->dev, "phy link never came up\n");
109 return -EINVAL;
110 }
111
112 return 0;
113}
114
115static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
116{
117 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
118 u32 offset = irq - ks_pcie->msi_host_irqs[0];
119 struct pcie_port *pp = &ks_pcie->pp;
120 struct irq_chip *chip = irq_desc_get_chip(desc);
121
122 dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
123
124 /*
125 * The chained irq handler installation would have replaced normal
126 * interrupt driver handler so we need to take care of mask/unmask and
127 * ack operation.
128 */
129 chained_irq_enter(chip, desc);
130 ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
131 chained_irq_exit(chip, desc);
132}
133
134/**
135 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
136 * @irq: IRQ line for legacy interrupts
137 * @desc: Pointer to irq descriptor
138 *
139 * Traverse through pending legacy interrupts and invoke handler for each. Also
140 * takes care of interrupt controller level mask/ack operation.
141 */
142static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
143{
144 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
145 struct pcie_port *pp = &ks_pcie->pp;
146 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
147 struct irq_chip *chip = irq_desc_get_chip(desc);
148
149 dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
150
151 /*
152 * The chained irq handler installation would have replaced normal
153 * interrupt driver handler so we need to take care of mask/unmask and
154 * ack operation.
155 */
156 chained_irq_enter(chip, desc);
157 ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
158 chained_irq_exit(chip, desc);
159}
160
161static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
162 char *controller, int *num_irqs)
163{
164 int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
165 struct device *dev = ks_pcie->pp.dev;
166 struct device_node *np_pcie = dev->of_node, **np_temp;
167
168 if (!strcmp(controller, "msi-interrupt-controller"))
169 legacy = 0;
170
171 if (legacy) {
172 np_temp = &ks_pcie->legacy_intc_np;
173 max_host_irqs = MAX_LEGACY_HOST_IRQS;
174 host_irqs = &ks_pcie->legacy_host_irqs[0];
175 } else {
176 np_temp = &ks_pcie->msi_intc_np;
177 max_host_irqs = MAX_MSI_HOST_IRQS;
178 host_irqs = &ks_pcie->msi_host_irqs[0];
179 }
180
181 /* interrupt controller is in a child node */
182 *np_temp = of_find_node_by_name(np_pcie, controller);
183 if (!(*np_temp)) {
184 dev_err(dev, "Node for %s is absent\n", controller);
185 goto out;
186 }
187 temp = of_irq_count(*np_temp);
188 if (!temp)
189 goto out;
190 if (temp > max_host_irqs)
191 dev_warn(dev, "Too many %s interrupts defined %u\n",
192 (legacy ? "legacy" : "MSI"), temp);
193
194 /*
195 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
196 * 7 (MSI)
197 */
198 for (temp = 0; temp < max_host_irqs; temp++) {
199 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
200 if (host_irqs[temp] < 0)
201 break;
202 }
203 if (temp) {
204 *num_irqs = temp;
205 ret = 0;
206 }
207out:
208 return ret;
209}
210
211static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
212{
213 int i;
214
215 /* Legacy IRQ */
216 for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
217 irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
218 irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
219 ks_pcie_legacy_irq_handler);
220 }
221 ks_dw_pcie_enable_legacy_irqs(ks_pcie);
222
223 /* MSI IRQ */
224 if (IS_ENABLED(CONFIG_PCI_MSI)) {
225 for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
226 irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
227 ks_pcie_msi_irq_handler);
228 irq_set_handler_data(ks_pcie->msi_host_irqs[i],
229 ks_pcie);
230 }
231 }
232}
233
234/*
235 * When a PCI device does not exist during config cycles, keystone host gets a
236 * bus error instead of returning 0xffffffff. This handler always returns 0
237 * for this kind of faults.
238 */
239static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
240 struct pt_regs *regs)
241{
242 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
243
244 if ((instr & 0x0e100090) == 0x00100090) {
245 int reg = (instr >> 12) & 15;
246
247 regs->uregs[reg] = -1;
248 regs->ARM_pc += 4;
249 }
250
251 return 0;
252}
253
254static void __init ks_pcie_host_init(struct pcie_port *pp)
255{
256 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
257 u32 val;
258
259 ks_pcie_establish_link(ks_pcie);
260 ks_dw_pcie_setup_rc_app_regs(ks_pcie);
261 ks_pcie_setup_interrupts(ks_pcie);
262 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
263 pp->dbi_base + PCI_IO_BASE);
264
265 /* update the Vendor ID */
266 writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
267
268 /* update the DEV_STAT_CTRL to publish right mrrs */
269 val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
270 val &= ~PCI_EXP_DEVCTL_READRQ;
271 /* set the mrrs to 256 bytes */
272 val |= BIT(12);
273 writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
274
275 /*
276 * PCIe access errors that result into OCP errors are caught by ARM as
277 * "External aborts"
278 */
279 hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
280 "Asynchronous external abort");
281}
282
283static struct pcie_host_ops keystone_pcie_host_ops = {
284 .rd_other_conf = ks_dw_pcie_rd_other_conf,
285 .wr_other_conf = ks_dw_pcie_wr_other_conf,
286 .link_up = ks_dw_pcie_link_up,
287 .host_init = ks_pcie_host_init,
288 .msi_set_irq = ks_dw_pcie_msi_set_irq,
289 .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
290 .get_msi_addr = ks_dw_pcie_get_msi_addr,
291 .msi_host_init = ks_dw_pcie_msi_host_init,
292 .scan_bus = ks_dw_pcie_v3_65_scan_bus,
293};
294
295static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
296 struct platform_device *pdev)
297{
298 struct pcie_port *pp = &ks_pcie->pp;
299 int ret;
300
301 ret = ks_pcie_get_irq_controller_info(ks_pcie,
302 "legacy-interrupt-controller",
303 &ks_pcie->num_legacy_host_irqs);
304 if (ret)
305 return ret;
306
307 if (IS_ENABLED(CONFIG_PCI_MSI)) {
308 ret = ks_pcie_get_irq_controller_info(ks_pcie,
309 "msi-interrupt-controller",
310 &ks_pcie->num_msi_host_irqs);
311 if (ret)
312 return ret;
313 }
314
315 pp->root_bus_nr = -1;
316 pp->ops = &keystone_pcie_host_ops;
317 ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
318 if (ret) {
319 dev_err(&pdev->dev, "failed to initialize host\n");
320 return ret;
321 }
322
323 return ret;
324}
325
326static const struct of_device_id ks_pcie_of_match[] = {
327 {
328 .type = "pci",
329 .compatible = "ti,keystone-pcie",
330 },
331 { },
332};
333MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
334
335static int __exit ks_pcie_remove(struct platform_device *pdev)
336{
337 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
338
339 clk_disable_unprepare(ks_pcie->clk);
340
341 return 0;
342}
343
344static int __init ks_pcie_probe(struct platform_device *pdev)
345{
346 struct device *dev = &pdev->dev;
347 struct keystone_pcie *ks_pcie;
348 struct pcie_port *pp;
349 struct resource *res;
350 void __iomem *reg_p;
351 struct phy *phy;
352 int ret = 0;
353
354 ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
355 GFP_KERNEL);
356 if (!ks_pcie) {
357 dev_err(dev, "no memory for keystone pcie\n");
358 return -ENOMEM;
359 }
360 pp = &ks_pcie->pp;
361
362 /* initialize SerDes Phy if present */
363 phy = devm_phy_get(dev, "pcie-phy");
364 if (!IS_ERR_OR_NULL(phy)) {
365 ret = phy_init(phy);
366 if (ret < 0)
367 return ret;
368 }
369
370 /* index 2 is to read PCI DEVICE_ID */
371 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
372 reg_p = devm_ioremap_resource(dev, res);
373 if (IS_ERR(reg_p))
374 return PTR_ERR(reg_p);
375 ks_pcie->device_id = readl(reg_p) >> 16;
376 devm_iounmap(dev, reg_p);
377 devm_release_mem_region(dev, res->start, resource_size(res));
378
379 pp->dev = dev;
380 platform_set_drvdata(pdev, ks_pcie);
381 ks_pcie->clk = devm_clk_get(dev, "pcie");
382 if (IS_ERR(ks_pcie->clk)) {
383 dev_err(dev, "Failed to get pcie rc clock\n");
384 return PTR_ERR(ks_pcie->clk);
385 }
386 ret = clk_prepare_enable(ks_pcie->clk);
387 if (ret)
388 return ret;
389
390 ret = ks_add_pcie_port(ks_pcie, pdev);
391 if (ret < 0)
392 goto fail_clk;
393
394 return 0;
395fail_clk:
396 clk_disable_unprepare(ks_pcie->clk);
397
398 return ret;
399}
400
401static struct platform_driver ks_pcie_driver __refdata = {
402 .probe = ks_pcie_probe,
403 .remove = __exit_p(ks_pcie_remove),
404 .driver = {
405 .name = "keystone-pcie",
406 .owner = THIS_MODULE,
407 .of_match_table = of_match_ptr(ks_pcie_of_match),
408 },
409};
410
411module_platform_driver(ks_pcie_driver);
412
413MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
414MODULE_DESCRIPTION("Keystone PCIe host controller driver");
415MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
new file mode 100644
index 000000000000..1fc1fceede9e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.h
@@ -0,0 +1,58 @@
1/*
2 * Keystone PCI Controller's common includes
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#define MAX_LEGACY_IRQS 4
16#define MAX_MSI_HOST_IRQS 8
17#define MAX_LEGACY_HOST_IRQS 4
18
19struct keystone_pcie {
20 struct clk *clk;
21 struct pcie_port pp;
22 /* PCI Device ID */
23 u32 device_id;
24 int num_legacy_host_irqs;
25 int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
26 struct device_node *legacy_intc_np;
27
28 int num_msi_host_irqs;
29 int msi_host_irqs[MAX_MSI_HOST_IRQS];
30 struct device_node *msi_intc_np;
31 struct irq_domain *legacy_irq_domain;
32
33 /* Application register space */
34 void __iomem *va_app_base;
35 struct resource app;
36};
37
38/* Keystone DW specific MSI controller APIs/definitions */
39void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
40u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
41
42/* Keystone specific PCI controller APIs */
43void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
44void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
45int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
46 struct device_node *msi_intc_np);
47int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
48 unsigned int devfn, int where, int size, u32 val);
49int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
50 unsigned int devfn, int where, int size, u32 *val);
51void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
52int ks_dw_pcie_link_up(struct pcie_port *pp);
53void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
54void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
55void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
56void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
57int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
58 struct msi_chip *chip);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index a8c6f1a92e0f..b1315e197ffb 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -873,7 +873,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
873 rangesz = pna + na + ns; 873 rangesz = pna + na + ns;
874 nranges = rlen / sizeof(__be32) / rangesz; 874 nranges = rlen / sizeof(__be32) / rangesz;
875 875
876 for (i = 0; i < nranges; i++) { 876 for (i = 0; i < nranges; i++, range += rangesz) {
877 u32 flags = of_read_number(range, 1); 877 u32 flags = of_read_number(range, 1);
878 u32 slot = of_read_number(range + 1, 1); 878 u32 slot = of_read_number(range + 1, 1);
879 u64 cpuaddr = of_read_number(range + na, pna); 879 u64 cpuaddr = of_read_number(range + na, pna);
@@ -883,14 +883,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
883 rtype = IORESOURCE_IO; 883 rtype = IORESOURCE_IO;
884 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) 884 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
885 rtype = IORESOURCE_MEM; 885 rtype = IORESOURCE_MEM;
886 else
887 continue;
886 888
887 if (slot == PCI_SLOT(devfn) && type == rtype) { 889 if (slot == PCI_SLOT(devfn) && type == rtype) {
888 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); 890 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
889 *attr = DT_CPUADDR_TO_ATTR(cpuaddr); 891 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
890 return 0; 892 return 0;
891 } 893 }
892
893 range += rangesz;
894 } 894 }
895 895
896 return -ENOENT; 896 return -ENOENT;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 0fb0fdb223d5..3d43874319be 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -38,6 +38,7 @@
38#include <linux/of_pci.h> 38#include <linux/of_pci.h>
39#include <linux/of_platform.h> 39#include <linux/of_platform.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/phy/phy.h>
41#include <linux/platform_device.h> 42#include <linux/platform_device.h>
42#include <linux/reset.h> 43#include <linux/reset.h>
43#include <linux/sizes.h> 44#include <linux/sizes.h>
@@ -115,13 +116,20 @@
115 116
116#define AFI_INTR_CODE 0xb8 117#define AFI_INTR_CODE 0xb8
117#define AFI_INTR_CODE_MASK 0xf 118#define AFI_INTR_CODE_MASK 0xf
118#define AFI_INTR_AXI_SLAVE_ERROR 1 119#define AFI_INTR_INI_SLAVE_ERROR 1
119#define AFI_INTR_AXI_DECODE_ERROR 2 120#define AFI_INTR_INI_DECODE_ERROR 2
120#define AFI_INTR_TARGET_ABORT 3 121#define AFI_INTR_TARGET_ABORT 3
121#define AFI_INTR_MASTER_ABORT 4 122#define AFI_INTR_MASTER_ABORT 4
122#define AFI_INTR_INVALID_WRITE 5 123#define AFI_INTR_INVALID_WRITE 5
123#define AFI_INTR_LEGACY 6 124#define AFI_INTR_LEGACY 6
124#define AFI_INTR_FPCI_DECODE_ERROR 7 125#define AFI_INTR_FPCI_DECODE_ERROR 7
126#define AFI_INTR_AXI_DECODE_ERROR 8
127#define AFI_INTR_FPCI_TIMEOUT 9
128#define AFI_INTR_PE_PRSNT_SENSE 10
129#define AFI_INTR_PE_CLKREQ_SENSE 11
130#define AFI_INTR_CLKCLAMP_SENSE 12
131#define AFI_INTR_RDY4PD_SENSE 13
132#define AFI_INTR_P2P_ERROR 14
125 133
126#define AFI_INTR_SIGNATURE 0xbc 134#define AFI_INTR_SIGNATURE 0xbc
127#define AFI_UPPER_FPCI_ADDRESS 0xc0 135#define AFI_UPPER_FPCI_ADDRESS 0xc0
@@ -152,8 +160,10 @@
152#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) 160#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
153#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) 161#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
154#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) 162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
155#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) 164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
156#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) 165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
166#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
157#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) 167#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
158 168
159#define AFI_FUSE 0x104 169#define AFI_FUSE 0x104
@@ -165,12 +175,21 @@
165#define AFI_PEX_CTRL_RST (1 << 0) 175#define AFI_PEX_CTRL_RST (1 << 0)
166#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) 176#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
167#define AFI_PEX_CTRL_REFCLK_EN (1 << 3) 177#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
178#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
179
180#define AFI_PLLE_CONTROL 0x160
181#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
168 183
169#define AFI_PEXBIAS_CTRL_0 0x168 184#define AFI_PEXBIAS_CTRL_0 0x168
170 185
171#define RP_VEND_XP 0x00000F00 186#define RP_VEND_XP 0x00000F00
172#define RP_VEND_XP_DL_UP (1 << 30) 187#define RP_VEND_XP_DL_UP (1 << 30)
173 188
189#define RP_PRIV_MISC 0x00000FE0
190#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192
174#define RP_LINK_CONTROL_STATUS 0x00000090 193#define RP_LINK_CONTROL_STATUS 0x00000090
175#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 194#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
176#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 195#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
@@ -197,6 +216,7 @@
197 216
198#define PADS_REFCLK_CFG0 0x000000C8 217#define PADS_REFCLK_CFG0 0x000000C8
199#define PADS_REFCLK_CFG1 0x000000CC 218#define PADS_REFCLK_CFG1 0x000000CC
219#define PADS_REFCLK_BIAS 0x000000D0
200 220
201/* 221/*
202 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit 222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
@@ -236,6 +256,7 @@ struct tegra_pcie_soc_data {
236 bool has_pex_bias_ctrl; 256 bool has_pex_bias_ctrl;
237 bool has_intr_prsnt_sense; 257 bool has_intr_prsnt_sense;
238 bool has_cml_clk; 258 bool has_cml_clk;
259 bool has_gen2;
239}; 260};
240 261
241static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) 262static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
@@ -253,6 +274,7 @@ struct tegra_pcie {
253 struct list_head buses; 274 struct list_head buses;
254 struct resource *cs; 275 struct resource *cs;
255 276
277 struct resource all;
256 struct resource io; 278 struct resource io;
257 struct resource mem; 279 struct resource mem;
258 struct resource prefetch; 280 struct resource prefetch;
@@ -267,6 +289,8 @@ struct tegra_pcie {
267 struct reset_control *afi_rst; 289 struct reset_control *afi_rst;
268 struct reset_control *pcie_xrst; 290 struct reset_control *pcie_xrst;
269 291
292 struct phy *phy;
293
270 struct tegra_msi msi; 294 struct tegra_msi msi;
271 295
272 struct list_head ports; 296 struct list_head ports;
@@ -382,7 +406,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
382 for (i = 0; i < 16; i++) { 406 for (i = 0; i < 16; i++) {
383 unsigned long virt = (unsigned long)bus->area->addr + 407 unsigned long virt = (unsigned long)bus->area->addr +
384 i * SZ_64K; 408 i * SZ_64K;
385 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K; 409 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
386 410
387 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot); 411 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
388 if (err < 0) { 412 if (err < 0) {
@@ -561,6 +585,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
561 if (soc->has_pex_clkreq_en) 585 if (soc->has_pex_clkreq_en)
562 value |= AFI_PEX_CTRL_CLKREQ_EN; 586 value |= AFI_PEX_CTRL_CLKREQ_EN;
563 587
588 value |= AFI_PEX_CTRL_OVERRIDE_EN;
589
564 afi_writel(port->pcie, value, ctrl); 590 afi_writel(port->pcie, value, ctrl);
565 591
566 tegra_pcie_port_reset(port); 592 tegra_pcie_port_reset(port);
@@ -568,6 +594,7 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
568 594
569static void tegra_pcie_port_disable(struct tegra_pcie_port *port) 595static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
570{ 596{
597 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
571 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); 598 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
572 unsigned long value; 599 unsigned long value;
573 600
@@ -578,6 +605,10 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
578 605
579 /* disable reference clock */ 606 /* disable reference clock */
580 value = afi_readl(port->pcie, ctrl); 607 value = afi_readl(port->pcie, ctrl);
608
609 if (soc->has_pex_clkreq_en)
610 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
611
581 value &= ~AFI_PEX_CTRL_REFCLK_EN; 612 value &= ~AFI_PEX_CTRL_REFCLK_EN;
582 afi_writel(port->pcie, value, ctrl); 613 afi_writel(port->pcie, value, ctrl);
583} 614}
@@ -626,13 +657,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
626static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) 657static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
627{ 658{
628 struct tegra_pcie *pcie = sys_to_pcie(sys); 659 struct tegra_pcie *pcie = sys_to_pcie(sys);
660 int err;
661 phys_addr_t io_start;
662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0)
665 return err;
666
667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
668 if (err)
669 return err;
670
671 io_start = pci_pio_to_address(pcie->io.start);
629 672
630 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
631 pci_add_resource_offset(&sys->resources, &pcie->prefetch, 674 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
632 sys->mem_offset); 675 sys->mem_offset);
633 pci_add_resource(&sys->resources, &pcie->busn); 676 pci_add_resource(&sys->resources, &pcie->busn);
634 677
635 pci_ioremap_io(nr * SZ_64K, pcie->io.start); 678 pci_ioremap_io(nr * SZ_64K, io_start);
636 679
637 return 1; 680 return 1;
638} 681}
@@ -684,9 +727,15 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
684 "Target abort", 727 "Target abort",
685 "Master abort", 728 "Master abort",
686 "Invalid write", 729 "Invalid write",
730 "Legacy interrupt",
687 "Response decoding error", 731 "Response decoding error",
688 "AXI response decoding error", 732 "AXI response decoding error",
689 "Transaction timeout", 733 "Transaction timeout",
734 "Slot present pin change",
735 "Slot clock request change",
736 "TMS clock ramp change",
737 "TMS ready for power down",
738 "Peer2Peer error",
690 }; 739 };
691 struct tegra_pcie *pcie = arg; 740 struct tegra_pcie *pcie = arg;
692 u32 code, signature; 741 u32 code, signature;
@@ -737,6 +786,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
737static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) 786static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
738{ 787{
739 u32 fpci_bar, size, axi_address; 788 u32 fpci_bar, size, axi_address;
789 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
740 790
741 /* Bar 0: type 1 extended configuration space */ 791 /* Bar 0: type 1 extended configuration space */
742 fpci_bar = 0xfe100000; 792 fpci_bar = 0xfe100000;
@@ -749,7 +799,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
749 /* Bar 1: downstream IO bar */ 799 /* Bar 1: downstream IO bar */
750 fpci_bar = 0xfdfc0000; 800 fpci_bar = 0xfdfc0000;
751 size = resource_size(&pcie->io); 801 size = resource_size(&pcie->io);
752 axi_address = pcie->io.start; 802 axi_address = io_start;
753 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); 803 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
754 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); 804 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
755 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); 805 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -792,30 +842,27 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
792 afi_writel(pcie, 0, AFI_MSI_BAR_SZ); 842 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
793} 843}
794 844
795static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) 845static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
796{ 846{
797 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 847 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
798 struct tegra_pcie_port *port; 848 u32 value;
799 unsigned int timeout;
800 unsigned long value;
801 849
802 /* power down PCIe slot clock bias pad */ 850 timeout = jiffies + msecs_to_jiffies(timeout);
803 if (soc->has_pex_bias_ctrl)
804 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
805 851
806 /* configure mode and disable all ports */ 852 while (time_before(jiffies, timeout)) {
807 value = afi_readl(pcie, AFI_PCIE_CONFIG); 853 value = pads_readl(pcie, soc->pads_pll_ctl);
808 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; 854 if (value & PADS_PLL_CTL_LOCKDET)
809 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; 855 return 0;
810 856 }
811 list_for_each_entry(port, &pcie->ports, list)
812 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
813 857
814 afi_writel(pcie, value, AFI_PCIE_CONFIG); 858 return -ETIMEDOUT;
859}
815 860
816 value = afi_readl(pcie, AFI_FUSE); 861static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
817 value |= AFI_FUSE_PCIE_T0_GEN2_DIS; 862{
818 afi_writel(pcie, value, AFI_FUSE); 863 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
864 u32 value;
865 int err;
819 866
820 /* initialize internal PHY, enable up to 16 PCIE lanes */ 867 /* initialize internal PHY, enable up to 16 PCIE lanes */
821 pads_writel(pcie, 0x0, PADS_CTL_SEL); 868 pads_writel(pcie, 0x0, PADS_CTL_SEL);
@@ -834,6 +881,13 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
834 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; 881 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
835 pads_writel(pcie, value, soc->pads_pll_ctl); 882 pads_writel(pcie, value, soc->pads_pll_ctl);
836 883
884 /* reset PLL */
885 value = pads_readl(pcie, soc->pads_pll_ctl);
886 value &= ~PADS_PLL_CTL_RST_B4SM;
887 pads_writel(pcie, value, soc->pads_pll_ctl);
888
889 usleep_range(20, 100);
890
837 /* take PLL out of reset */ 891 /* take PLL out of reset */
838 value = pads_readl(pcie, soc->pads_pll_ctl); 892 value = pads_readl(pcie, soc->pads_pll_ctl);
839 value |= PADS_PLL_CTL_RST_B4SM; 893 value |= PADS_PLL_CTL_RST_B4SM;
@@ -846,15 +900,11 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
846 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1); 900 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
847 901
848 /* wait for the PLL to lock */ 902 /* wait for the PLL to lock */
849 timeout = 300; 903 err = tegra_pcie_pll_wait(pcie, 500);
850 do { 904 if (err < 0) {
851 value = pads_readl(pcie, soc->pads_pll_ctl); 905 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
852 usleep_range(1000, 2000); 906 return err;
853 if (--timeout == 0) { 907 }
854 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
855 return -EBUSY;
856 }
857 } while (!(value & PADS_PLL_CTL_LOCKDET));
858 908
859 /* turn off IDDQ override */ 909 /* turn off IDDQ override */
860 value = pads_readl(pcie, PADS_CTL); 910 value = pads_readl(pcie, PADS_CTL);
@@ -866,6 +916,58 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
866 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; 916 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
867 pads_writel(pcie, value, PADS_CTL); 917 pads_writel(pcie, value, PADS_CTL);
868 918
919 return 0;
920}
921
922static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
923{
924 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
925 struct tegra_pcie_port *port;
926 unsigned long value;
927 int err;
928
929 /* enable PLL power down */
930 if (pcie->phy) {
931 value = afi_readl(pcie, AFI_PLLE_CONTROL);
932 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
933 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
934 afi_writel(pcie, value, AFI_PLLE_CONTROL);
935 }
936
937 /* power down PCIe slot clock bias pad */
938 if (soc->has_pex_bias_ctrl)
939 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
940
941 /* configure mode and disable all ports */
942 value = afi_readl(pcie, AFI_PCIE_CONFIG);
943 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
944 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
945
946 list_for_each_entry(port, &pcie->ports, list)
947 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
948
949 afi_writel(pcie, value, AFI_PCIE_CONFIG);
950
951 if (soc->has_gen2) {
952 value = afi_readl(pcie, AFI_FUSE);
953 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
954 afi_writel(pcie, value, AFI_FUSE);
955 } else {
956 value = afi_readl(pcie, AFI_FUSE);
957 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
958 afi_writel(pcie, value, AFI_FUSE);
959 }
960
961 if (!pcie->phy)
962 err = tegra_pcie_phy_enable(pcie);
963 else
964 err = phy_power_on(pcie->phy);
965
966 if (err < 0) {
967 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
968 return err;
969 }
970
869 /* take the PCIe interface module out of reset */ 971 /* take the PCIe interface module out of reset */
870 reset_control_deassert(pcie->pcie_xrst); 972 reset_control_deassert(pcie->pcie_xrst);
871 973
@@ -899,6 +1001,10 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
899 1001
900 /* TODO: disable and unprepare clocks? */ 1002 /* TODO: disable and unprepare clocks? */
901 1003
1004 err = phy_power_off(pcie->phy);
1005 if (err < 0)
1006 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
1007
902 reset_control_assert(pcie->pcie_xrst); 1008 reset_control_assert(pcie->pcie_xrst);
903 reset_control_assert(pcie->afi_rst); 1009 reset_control_assert(pcie->afi_rst);
904 reset_control_assert(pcie->pex_rst); 1010 reset_control_assert(pcie->pex_rst);
@@ -1020,6 +1126,19 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1020 return err; 1126 return err;
1021 } 1127 }
1022 1128
1129 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1130 if (IS_ERR(pcie->phy)) {
1131 err = PTR_ERR(pcie->phy);
1132 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1133 return err;
1134 }
1135
1136 err = phy_init(pcie->phy);
1137 if (err < 0) {
1138 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1139 return err;
1140 }
1141
1023 err = tegra_pcie_power_on(pcie); 1142 err = tegra_pcie_power_on(pcie);
1024 if (err) { 1143 if (err) {
1025 dev_err(&pdev->dev, "failed to power up: %d\n", err); 1144 dev_err(&pdev->dev, "failed to power up: %d\n", err);
@@ -1078,10 +1197,17 @@ poweroff:
1078 1197
1079static int tegra_pcie_put_resources(struct tegra_pcie *pcie) 1198static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1080{ 1199{
1200 int err;
1201
1081 if (pcie->irq > 0) 1202 if (pcie->irq > 0)
1082 free_irq(pcie->irq, pcie); 1203 free_irq(pcie->irq, pcie);
1083 1204
1084 tegra_pcie_power_off(pcie); 1205 tegra_pcie_power_off(pcie);
1206
1207 err = phy_exit(pcie->phy);
1208 if (err < 0)
1209 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1210
1085 return 0; 1211 return 0;
1086} 1212}
1087 1213
@@ -1170,8 +1296,10 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1170 return hwirq; 1296 return hwirq;
1171 1297
1172 irq = irq_create_mapping(msi->domain, hwirq); 1298 irq = irq_create_mapping(msi->domain, hwirq);
1173 if (!irq) 1299 if (!irq) {
1300 tegra_msi_free(msi, hwirq);
1174 return -EINVAL; 1301 return -EINVAL;
1302 }
1175 1303
1176 irq_set_msi_desc(irq, desc); 1304 irq_set_msi_desc(irq, desc);
1177 1305
@@ -1189,8 +1317,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1189{ 1317{
1190 struct tegra_msi *msi = to_tegra_msi(chip); 1318 struct tegra_msi *msi = to_tegra_msi(chip);
1191 struct irq_data *d = irq_get_irq_data(irq); 1319 struct irq_data *d = irq_get_irq_data(irq);
1320 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1192 1321
1193 tegra_msi_free(msi, d->hwirq); 1322 irq_dispose_mapping(irq);
1323 tegra_msi_free(msi, hwirq);
1194} 1324}
1195 1325
1196static struct irq_chip tegra_msi_irq_chip = { 1326static struct irq_chip tegra_msi_irq_chip = {
@@ -1327,7 +1457,19 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1327{ 1457{
1328 struct device_node *np = pcie->dev->of_node; 1458 struct device_node *np = pcie->dev->of_node;
1329 1459
1330 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1460 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1461 switch (lanes) {
1462 case 0x0000104:
1463 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1464 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1465 return 0;
1466
1467 case 0x0000102:
1468 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1469 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1470 return 0;
1471 }
1472 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1331 switch (lanes) { 1473 switch (lanes) {
1332 case 0x00000204: 1474 case 0x00000204:
1333 dev_info(pcie->dev, "4x1, 2x1 configuration\n"); 1475 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
@@ -1435,7 +1577,23 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1435 struct device_node *np = pcie->dev->of_node; 1577 struct device_node *np = pcie->dev->of_node;
1436 unsigned int i = 0; 1578 unsigned int i = 0;
1437 1579
1438 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1580 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1581 pcie->num_supplies = 7;
1582
1583 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1584 sizeof(*pcie->supplies),
1585 GFP_KERNEL);
1586 if (!pcie->supplies)
1587 return -ENOMEM;
1588
1589 pcie->supplies[i++].supply = "avddio-pex";
1590 pcie->supplies[i++].supply = "dvddio-pex";
1591 pcie->supplies[i++].supply = "avdd-pex-pll";
1592 pcie->supplies[i++].supply = "hvdd-pex";
1593 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1594 pcie->supplies[i++].supply = "vddio-pex-ctl";
1595 pcie->supplies[i++].supply = "avdd-pll-erefe";
1596 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1439 bool need_pexa = false, need_pexb = false; 1597 bool need_pexa = false, need_pexb = false;
1440 1598
1441 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ 1599 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
@@ -1514,32 +1672,50 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1514 struct resource res; 1672 struct resource res;
1515 int err; 1673 int err;
1516 1674
1675 memset(&pcie->all, 0, sizeof(pcie->all));
1676 pcie->all.flags = IORESOURCE_MEM;
1677 pcie->all.name = np->full_name;
1678 pcie->all.start = ~0;
1679 pcie->all.end = 0;
1680
1517 if (of_pci_range_parser_init(&parser, np)) { 1681 if (of_pci_range_parser_init(&parser, np)) {
1518 dev_err(pcie->dev, "missing \"ranges\" property\n"); 1682 dev_err(pcie->dev, "missing \"ranges\" property\n");
1519 return -EINVAL; 1683 return -EINVAL;
1520 } 1684 }
1521 1685
1522 for_each_of_pci_range(&parser, &range) { 1686 for_each_of_pci_range(&parser, &range) {
1523 of_pci_range_to_resource(&range, np, &res); 1687 err = of_pci_range_to_resource(&range, np, &res);
1688 if (err < 0)
1689 return err;
1524 1690
1525 switch (res.flags & IORESOURCE_TYPE_BITS) { 1691 switch (res.flags & IORESOURCE_TYPE_BITS) {
1526 case IORESOURCE_IO: 1692 case IORESOURCE_IO:
1527 memcpy(&pcie->io, &res, sizeof(res)); 1693 memcpy(&pcie->io, &res, sizeof(res));
1528 pcie->io.name = "I/O"; 1694 pcie->io.name = np->full_name;
1529 break; 1695 break;
1530 1696
1531 case IORESOURCE_MEM: 1697 case IORESOURCE_MEM:
1532 if (res.flags & IORESOURCE_PREFETCH) { 1698 if (res.flags & IORESOURCE_PREFETCH) {
1533 memcpy(&pcie->prefetch, &res, sizeof(res)); 1699 memcpy(&pcie->prefetch, &res, sizeof(res));
1534 pcie->prefetch.name = "PREFETCH"; 1700 pcie->prefetch.name = "prefetchable";
1535 } else { 1701 } else {
1536 memcpy(&pcie->mem, &res, sizeof(res)); 1702 memcpy(&pcie->mem, &res, sizeof(res));
1537 pcie->mem.name = "MEM"; 1703 pcie->mem.name = "non-prefetchable";
1538 } 1704 }
1539 break; 1705 break;
1540 } 1706 }
1707
1708 if (res.start <= pcie->all.start)
1709 pcie->all.start = res.start;
1710
1711 if (res.end >= pcie->all.end)
1712 pcie->all.end = res.end;
1541 } 1713 }
1542 1714
1715 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1716 if (err < 0)
1717 return err;
1718
1543 err = of_pci_parse_bus_range(np, &pcie->busn); 1719 err = of_pci_parse_bus_range(np, &pcie->busn);
1544 if (err < 0) { 1720 if (err < 0) {
1545 dev_err(pcie->dev, "failed to parse ranges property: %d\n", 1721 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -1641,6 +1817,12 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1641 unsigned int retries = 3; 1817 unsigned int retries = 3;
1642 unsigned long value; 1818 unsigned long value;
1643 1819
1820 /* override presence detection */
1821 value = readl(port->base + RP_PRIV_MISC);
1822 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1823 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1824 writel(value, port->base + RP_PRIV_MISC);
1825
1644 do { 1826 do {
1645 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; 1827 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1646 1828
@@ -1721,6 +1903,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1721 .has_pex_bias_ctrl = false, 1903 .has_pex_bias_ctrl = false,
1722 .has_intr_prsnt_sense = false, 1904 .has_intr_prsnt_sense = false,
1723 .has_cml_clk = false, 1905 .has_cml_clk = false,
1906 .has_gen2 = false,
1724}; 1907};
1725 1908
1726static const struct tegra_pcie_soc_data tegra30_pcie_data = { 1909static const struct tegra_pcie_soc_data tegra30_pcie_data = {
@@ -1732,9 +1915,23 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1732 .has_pex_bias_ctrl = true, 1915 .has_pex_bias_ctrl = true,
1733 .has_intr_prsnt_sense = true, 1916 .has_intr_prsnt_sense = true,
1734 .has_cml_clk = true, 1917 .has_cml_clk = true,
1918 .has_gen2 = false,
1919};
1920
1921static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1922 .num_ports = 2,
1923 .msi_base_shift = 8,
1924 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1925 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1926 .has_pex_clkreq_en = true,
1927 .has_pex_bias_ctrl = true,
1928 .has_intr_prsnt_sense = true,
1929 .has_cml_clk = true,
1930 .has_gen2 = true,
1735}; 1931};
1736 1932
1737static const struct of_device_id tegra_pcie_of_match[] = { 1933static const struct of_device_id tegra_pcie_of_match[] = {
1934 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1738 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data }, 1935 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1739 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data }, 1936 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1740 { }, 1937 { },
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
new file mode 100644
index 000000000000..9ecabfa8c634
--- /dev/null
+++ b/drivers/pci/host/pci-xgene.c
@@ -0,0 +1,659 @@
1/**
2 * APM X-Gene PCIe Driver
3 *
4 * Copyright (c) 2014 Applied Micro Circuits Corporation.
5 *
6 * Author: Tanmay Inamdar <tinamdar@apm.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19#include <linux/clk-private.h>
20#include <linux/delay.h>
21#include <linux/io.h>
22#include <linux/jiffies.h>
23#include <linux/memblock.h>
24#include <linux/module.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_pci.h>
29#include <linux/pci.h>
30#include <linux/platform_device.h>
31#include <linux/slab.h>
32
33#define PCIECORE_CTLANDSTATUS 0x50
34#define PIM1_1L 0x80
35#define IBAR2 0x98
36#define IR2MSK 0x9c
37#define PIM2_1L 0xa0
38#define IBAR3L 0xb4
39#define IR3MSKL 0xbc
40#define PIM3_1L 0xc4
41#define OMR1BARL 0x100
42#define OMR2BARL 0x118
43#define OMR3BARL 0x130
44#define CFGBARL 0x154
45#define CFGBARH 0x158
46#define CFGCTL 0x15c
47#define RTDID 0x160
48#define BRIDGE_CFG_0 0x2000
49#define BRIDGE_CFG_4 0x2010
50#define BRIDGE_STATUS_0 0x2600
51
52#define LINK_UP_MASK 0x00000100
53#define AXI_EP_CFG_ACCESS 0x10000
54#define EN_COHERENCY 0xF0000000
55#define EN_REG 0x00000001
56#define OB_LO_IO 0x00000002
57#define XGENE_PCIE_VENDORID 0x10E8
58#define XGENE_PCIE_DEVICEID 0xE004
59#define SZ_1T (SZ_1G*1024ULL)
60#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
61
62struct xgene_pcie_port {
63 struct device_node *node;
64 struct device *dev;
65 struct clk *clk;
66 void __iomem *csr_base;
67 void __iomem *cfg_base;
68 unsigned long cfg_addr;
69 bool link_up;
70};
71
72static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
73{
74 return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
75}
76
77/* PCIe Configuration Out/In */
78static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
79{
80 writel(val, addr + offset);
81}
82
83static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
84{
85 u32 val32 = readl(addr + (offset & ~0x3));
86
87 switch (offset & 0x3) {
88 case 2:
89 val32 &= ~0xFFFF0000;
90 val32 |= (u32)val << 16;
91 break;
92 case 0:
93 default:
94 val32 &= ~0xFFFF;
95 val32 |= val;
96 break;
97 }
98 writel(val32, addr + (offset & ~0x3));
99}
100
101static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
102{
103 u32 val32 = readl(addr + (offset & ~0x3));
104
105 switch (offset & 0x3) {
106 case 0:
107 val32 &= ~0xFF;
108 val32 |= val;
109 break;
110 case 1:
111 val32 &= ~0xFF00;
112 val32 |= (u32)val << 8;
113 break;
114 case 2:
115 val32 &= ~0xFF0000;
116 val32 |= (u32)val << 16;
117 break;
118 case 3:
119 default:
120 val32 &= ~0xFF000000;
121 val32 |= (u32)val << 24;
122 break;
123 }
124 writel(val32, addr + (offset & ~0x3));
125}
126
127static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
128{
129 *val = readl(addr + offset);
130}
131
132static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
133{
134 *val = readl(addr + (offset & ~0x3));
135
136 switch (offset & 0x3) {
137 case 2:
138 *val >>= 16;
139 break;
140 }
141
142 *val &= 0xFFFF;
143}
144
145static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
146{
147 *val = readl(addr + (offset & ~0x3));
148
149 switch (offset & 0x3) {
150 case 3:
151 *val = *val >> 24;
152 break;
153 case 2:
154 *val = *val >> 16;
155 break;
156 case 1:
157 *val = *val >> 8;
158 break;
159 }
160 *val &= 0xFF;
161}
162
163/*
164 * When the address bit [17:16] is 2'b01, the Configuration access will be
165 * treated as Type 1 and it will be forwarded to external PCIe device.
166 */
167static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
168{
169 struct xgene_pcie_port *port = bus->sysdata;
170
171 if (bus->number >= (bus->primary + 1))
172 return port->cfg_base + AXI_EP_CFG_ACCESS;
173
174 return port->cfg_base;
175}
176
177/*
178 * For Configuration request, RTDID register is used as Bus Number,
179 * Device Number and Function number of the header fields.
180 */
181static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
182{
183 struct xgene_pcie_port *port = bus->sysdata;
184 unsigned int b, d, f;
185 u32 rtdid_val = 0;
186
187 b = bus->number;
188 d = PCI_SLOT(devfn);
189 f = PCI_FUNC(devfn);
190
191 if (!pci_is_root_bus(bus))
192 rtdid_val = (b << 8) | (d << 3) | f;
193
194 writel(rtdid_val, port->csr_base + RTDID);
195 /* read the register back to ensure flush */
196 readl(port->csr_base + RTDID);
197}
198
199/*
200 * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
201 * the translation from PCI bus to native BUS. Entire DDR region
202 * is mapped into PCIe space using these registers, so it can be
203 * reached by DMA from EP devices. The BAR0/1 of bridge should be
204 * hidden during enumeration to avoid the sizing and resource allocation
205 * by PCIe core.
206 */
207static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
208{
209 if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
210 (offset == PCI_BASE_ADDRESS_1)))
211 return true;
212
213 return false;
214}
215
216static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
217 int offset, int len, u32 *val)
218{
219 struct xgene_pcie_port *port = bus->sysdata;
220 void __iomem *addr;
221
222 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
223 return PCIBIOS_DEVICE_NOT_FOUND;
224
225 if (xgene_pcie_hide_rc_bars(bus, offset)) {
226 *val = 0;
227 return PCIBIOS_SUCCESSFUL;
228 }
229
230 xgene_pcie_set_rtdid_reg(bus, devfn);
231 addr = xgene_pcie_get_cfg_base(bus);
232 switch (len) {
233 case 1:
234 xgene_pcie_cfg_in8(addr, offset, val);
235 break;
236 case 2:
237 xgene_pcie_cfg_in16(addr, offset, val);
238 break;
239 default:
240 xgene_pcie_cfg_in32(addr, offset, val);
241 break;
242 }
243
244 return PCIBIOS_SUCCESSFUL;
245}
246
247static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
248 int offset, int len, u32 val)
249{
250 struct xgene_pcie_port *port = bus->sysdata;
251 void __iomem *addr;
252
253 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
254 return PCIBIOS_DEVICE_NOT_FOUND;
255
256 if (xgene_pcie_hide_rc_bars(bus, offset))
257 return PCIBIOS_SUCCESSFUL;
258
259 xgene_pcie_set_rtdid_reg(bus, devfn);
260 addr = xgene_pcie_get_cfg_base(bus);
261 switch (len) {
262 case 1:
263 xgene_pcie_cfg_out8(addr, offset, (u8)val);
264 break;
265 case 2:
266 xgene_pcie_cfg_out16(addr, offset, (u16)val);
267 break;
268 default:
269 xgene_pcie_cfg_out32(addr, offset, val);
270 break;
271 }
272
273 return PCIBIOS_SUCCESSFUL;
274}
275
276static struct pci_ops xgene_pcie_ops = {
277 .read = xgene_pcie_read_config,
278 .write = xgene_pcie_write_config
279};
280
281static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
282 u32 flags, u64 size)
283{
284 u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
285 u32 val32 = 0;
286 u32 val;
287
288 val32 = readl(csr_base + addr);
289 val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
290 writel(val, csr_base + addr);
291
292 val32 = readl(csr_base + addr + 0x04);
293 val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
294 writel(val, csr_base + addr + 0x04);
295
296 val32 = readl(csr_base + addr + 0x04);
297 val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
298 writel(val, csr_base + addr + 0x04);
299
300 val32 = readl(csr_base + addr + 0x08);
301 val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
302 writel(val, csr_base + addr + 0x08);
303
304 return mask;
305}
306
307static void xgene_pcie_linkup(struct xgene_pcie_port *port,
308 u32 *lanes, u32 *speed)
309{
310 void __iomem *csr_base = port->csr_base;
311 u32 val32;
312
313 port->link_up = false;
314 val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
315 if (val32 & LINK_UP_MASK) {
316 port->link_up = true;
317 *speed = PIPE_PHY_RATE_RD(val32);
318 val32 = readl(csr_base + BRIDGE_STATUS_0);
319 *lanes = val32 >> 26;
320 }
321}
322
323static int xgene_pcie_init_port(struct xgene_pcie_port *port)
324{
325 int rc;
326
327 port->clk = clk_get(port->dev, NULL);
328 if (IS_ERR(port->clk)) {
329 dev_err(port->dev, "clock not available\n");
330 return -ENODEV;
331 }
332
333 rc = clk_prepare_enable(port->clk);
334 if (rc) {
335 dev_err(port->dev, "clock enable failed\n");
336 return rc;
337 }
338
339 return 0;
340}
341
342static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
343 struct platform_device *pdev)
344{
345 struct resource *res;
346
347 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
348 port->csr_base = devm_ioremap_resource(port->dev, res);
349 if (IS_ERR(port->csr_base))
350 return PTR_ERR(port->csr_base);
351
352 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
353 port->cfg_base = devm_ioremap_resource(port->dev, res);
354 if (IS_ERR(port->cfg_base))
355 return PTR_ERR(port->cfg_base);
356 port->cfg_addr = res->start;
357
358 return 0;
359}
360
361static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
362 struct resource *res, u32 offset,
363 u64 cpu_addr, u64 pci_addr)
364{
365 void __iomem *base = port->csr_base + offset;
366 resource_size_t size = resource_size(res);
367 u64 restype = resource_type(res);
368 u64 mask = 0;
369 u32 min_size;
370 u32 flag = EN_REG;
371
372 if (restype == IORESOURCE_MEM) {
373 min_size = SZ_128M;
374 } else {
375 min_size = 128;
376 flag |= OB_LO_IO;
377 }
378
379 if (size >= min_size)
380 mask = ~(size - 1) | flag;
381 else
382 dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
383 (u64)size, min_size);
384
385 writel(lower_32_bits(cpu_addr), base);
386 writel(upper_32_bits(cpu_addr), base + 0x04);
387 writel(lower_32_bits(mask), base + 0x08);
388 writel(upper_32_bits(mask), base + 0x0c);
389 writel(lower_32_bits(pci_addr), base + 0x10);
390 writel(upper_32_bits(pci_addr), base + 0x14);
391}
392
393static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
394{
395 writel(lower_32_bits(addr), csr_base + CFGBARL);
396 writel(upper_32_bits(addr), csr_base + CFGBARH);
397 writel(EN_REG, csr_base + CFGCTL);
398}
399
400static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
401 struct list_head *res,
402 resource_size_t io_base)
403{
404 struct pci_host_bridge_window *window;
405 struct device *dev = port->dev;
406 int ret;
407
408 list_for_each_entry(window, res, list) {
409 struct resource *res = window->res;
410 u64 restype = resource_type(res);
411
412 dev_dbg(port->dev, "%pR\n", res);
413
414 switch (restype) {
415 case IORESOURCE_IO:
416 xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
417 res->start - window->offset);
418 ret = pci_remap_iospace(res, io_base);
419 if (ret < 0)
420 return ret;
421 break;
422 case IORESOURCE_MEM:
423 xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
424 res->start - window->offset);
425 break;
426 case IORESOURCE_BUS:
427 break;
428 default:
429 dev_err(dev, "invalid resource %pR\n", res);
430 return -EINVAL;
431 }
432 }
433 xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
434
435 return 0;
436}
437
438static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
439{
440 writel(lower_32_bits(pim), addr);
441 writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
442 writel(lower_32_bits(size), addr + 0x10);
443 writel(upper_32_bits(size), addr + 0x14);
444}
445
446/*
447 * X-Gene PCIe support maximum 3 inbound memory regions
448 * This function helps to select a region based on size of region
449 */
450static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
451{
452 if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
453 *ib_reg_mask |= (1 << 1);
454 return 1;
455 }
456
457 if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
458 *ib_reg_mask |= (1 << 0);
459 return 0;
460 }
461
462 if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
463 *ib_reg_mask |= (1 << 2);
464 return 2;
465 }
466
467 return -EINVAL;
468}
469
470static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
471 struct of_pci_range *range, u8 *ib_reg_mask)
472{
473 void __iomem *csr_base = port->csr_base;
474 void __iomem *cfg_base = port->cfg_base;
475 void *bar_addr;
476 void *pim_addr;
477 u64 cpu_addr = range->cpu_addr;
478 u64 pci_addr = range->pci_addr;
479 u64 size = range->size;
480 u64 mask = ~(size - 1) | EN_REG;
481 u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
482 u32 bar_low;
483 int region;
484
485 region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
486 if (region < 0) {
487 dev_warn(port->dev, "invalid pcie dma-range config\n");
488 return;
489 }
490
491 if (range->flags & IORESOURCE_PREFETCH)
492 flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
493
494 bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
495 switch (region) {
496 case 0:
497 xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
498 bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
499 writel(bar_low, bar_addr);
500 writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
501 pim_addr = csr_base + PIM1_1L;
502 break;
503 case 1:
504 bar_addr = csr_base + IBAR2;
505 writel(bar_low, bar_addr);
506 writel(lower_32_bits(mask), csr_base + IR2MSK);
507 pim_addr = csr_base + PIM2_1L;
508 break;
509 case 2:
510 bar_addr = csr_base + IBAR3L;
511 writel(bar_low, bar_addr);
512 writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
513 writel(lower_32_bits(mask), csr_base + IR3MSKL);
514 writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
515 pim_addr = csr_base + PIM3_1L;
516 break;
517 }
518
519 xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
520}
521
522static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
523 struct device_node *node)
524{
525 const int na = 3, ns = 2;
526 int rlen;
527
528 parser->node = node;
529 parser->pna = of_n_addr_cells(node);
530 parser->np = parser->pna + na + ns;
531
532 parser->range = of_get_property(node, "dma-ranges", &rlen);
533 if (!parser->range)
534 return -ENOENT;
535 parser->end = parser->range + rlen / sizeof(__be32);
536
537 return 0;
538}
539
540static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
541{
542 struct device_node *np = port->node;
543 struct of_pci_range range;
544 struct of_pci_range_parser parser;
545 struct device *dev = port->dev;
546 u8 ib_reg_mask = 0;
547
548 if (pci_dma_range_parser_init(&parser, np)) {
549 dev_err(dev, "missing dma-ranges property\n");
550 return -EINVAL;
551 }
552
553 /* Get the dma-ranges from DT */
554 for_each_of_pci_range(&parser, &range) {
555 u64 end = range.cpu_addr + range.size - 1;
556
557 dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
558 range.flags, range.cpu_addr, end, range.pci_addr);
559 xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
560 }
561 return 0;
562}
563
564/* clear BAR configuration which was done by firmware */
565static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
566{
567 int i;
568
569 for (i = PIM1_1L; i <= CFGCTL; i += 4)
570 writel(0x0, port->csr_base + i);
571}
572
573static int xgene_pcie_setup(struct xgene_pcie_port *port,
574 struct list_head *res,
575 resource_size_t io_base)
576{
577 u32 val, lanes = 0, speed = 0;
578 int ret;
579
580 xgene_pcie_clear_config(port);
581
582 /* setup the vendor and device IDs correctly */
583 val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
584 writel(val, port->csr_base + BRIDGE_CFG_0);
585
586 ret = xgene_pcie_map_ranges(port, res, io_base);
587 if (ret)
588 return ret;
589
590 ret = xgene_pcie_parse_map_dma_ranges(port);
591 if (ret)
592 return ret;
593
594 xgene_pcie_linkup(port, &lanes, &speed);
595 if (!port->link_up)
596 dev_info(port->dev, "(rc) link down\n");
597 else
598 dev_info(port->dev, "(rc) x%d gen-%d link up\n",
599 lanes, speed + 1);
600 return 0;
601}
602
603static int xgene_pcie_probe_bridge(struct platform_device *pdev)
604{
605 struct device_node *dn = pdev->dev.of_node;
606 struct xgene_pcie_port *port;
607 resource_size_t iobase = 0;
608 struct pci_bus *bus;
609 int ret;
610 LIST_HEAD(res);
611
612 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
613 if (!port)
614 return -ENOMEM;
615 port->node = of_node_get(pdev->dev.of_node);
616 port->dev = &pdev->dev;
617
618 ret = xgene_pcie_map_reg(port, pdev);
619 if (ret)
620 return ret;
621
622 ret = xgene_pcie_init_port(port);
623 if (ret)
624 return ret;
625
626 ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
627 if (ret)
628 return ret;
629
630 ret = xgene_pcie_setup(port, &res, iobase);
631 if (ret)
632 return ret;
633
634 bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res);
635 if (!bus)
636 return -ENOMEM;
637
638 platform_set_drvdata(pdev, port);
639 return 0;
640}
641
642static const struct of_device_id xgene_pcie_match_table[] = {
643 {.compatible = "apm,xgene-pcie",},
644 {},
645};
646
647static struct platform_driver xgene_pcie_driver = {
648 .driver = {
649 .name = "xgene-pcie",
650 .owner = THIS_MODULE,
651 .of_match_table = of_match_ptr(xgene_pcie_match_table),
652 },
653 .probe = xgene_pcie_probe_bridge,
654};
655module_platform_driver(xgene_pcie_driver);
656
657MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
658MODULE_DESCRIPTION("APM X-Gene PCIe driver");
659MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 52bd3a143563..dfed00aa3ac0 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -73,6 +73,8 @@ static unsigned long global_io_offset;
73 73
74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) 74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
75{ 75{
76 BUG_ON(!sys->private_data);
77
76 return sys->private_data; 78 return sys->private_data;
77} 79}
78 80
@@ -194,30 +196,6 @@ void dw_pcie_msi_init(struct pcie_port *pp)
194 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); 196 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
195} 197}
196 198
197static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
198{
199 int flag = 1;
200
201 do {
202 pos = find_next_zero_bit(pp->msi_irq_in_use,
203 MAX_MSI_IRQS, pos);
204 /*if you have reached to the end then get out from here.*/
205 if (pos == MAX_MSI_IRQS)
206 return -ENOSPC;
207 /*
208 * Check if this position is at correct offset.nvec is always a
209 * power of two. pos0 must be nvec bit aligned.
210 */
211 if (pos % msgvec)
212 pos += msgvec - (pos % msgvec);
213 else
214 flag = 0;
215 } while (flag);
216
217 *pos0 = pos;
218 return 0;
219}
220
221static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) 199static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
222{ 200{
223 unsigned int res, bit, val; 201 unsigned int res, bit, val;
@@ -236,13 +214,14 @@ static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
236 214
237 for (i = 0; i < nvec; i++) { 215 for (i = 0; i < nvec; i++) {
238 irq_set_msi_desc_off(irq_base, i, NULL); 216 irq_set_msi_desc_off(irq_base, i, NULL);
239 clear_bit(pos + i, pp->msi_irq_in_use);
240 /* Disable corresponding interrupt on MSI controller */ 217 /* Disable corresponding interrupt on MSI controller */
241 if (pp->ops->msi_clear_irq) 218 if (pp->ops->msi_clear_irq)
242 pp->ops->msi_clear_irq(pp, pos + i); 219 pp->ops->msi_clear_irq(pp, pos + i);
243 else 220 else
244 dw_pcie_msi_clear_irq(pp, pos + i); 221 dw_pcie_msi_clear_irq(pp, pos + i);
245 } 222 }
223
224 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
246} 225}
247 226
248static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) 227static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@@ -258,31 +237,13 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
258 237
259static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 238static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
260{ 239{
261 int irq, pos0, pos1, i; 240 int irq, pos0, i;
262 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); 241 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
263 242
264 if (!pp) { 243 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
265 BUG(); 244 order_base_2(no_irqs));
266 return -EINVAL; 245 if (pos0 < 0)
267 } 246 goto no_valid_irq;
268
269 pos0 = find_first_zero_bit(pp->msi_irq_in_use,
270 MAX_MSI_IRQS);
271 if (pos0 % no_irqs) {
272 if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
273 goto no_valid_irq;
274 }
275 if (no_irqs > 1) {
276 pos1 = find_next_bit(pp->msi_irq_in_use,
277 MAX_MSI_IRQS, pos0);
278 /* there must be nvec number of consecutive free bits */
279 while ((pos1 - pos0) < no_irqs) {
280 if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
281 goto no_valid_irq;
282 pos1 = find_next_bit(pp->msi_irq_in_use,
283 MAX_MSI_IRQS, pos0);
284 }
285 }
286 247
287 irq = irq_find_mapping(pp->irq_domain, pos0); 248 irq = irq_find_mapping(pp->irq_domain, pos0);
288 if (!irq) 249 if (!irq)
@@ -300,7 +261,6 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
300 clear_irq_range(pp, irq, i, pos0); 261 clear_irq_range(pp, irq, i, pos0);
301 goto no_valid_irq; 262 goto no_valid_irq;
302 } 263 }
303 set_bit(pos0 + i, pp->msi_irq_in_use);
304 /*Enable corresponding interrupt in MSI interrupt controller */ 264 /*Enable corresponding interrupt in MSI interrupt controller */
305 if (pp->ops->msi_set_irq) 265 if (pp->ops->msi_set_irq)
306 pp->ops->msi_set_irq(pp, pos0 + i); 266 pp->ops->msi_set_irq(pp, pos0 + i);
@@ -316,69 +276,28 @@ no_valid_irq:
316 return -ENOSPC; 276 return -ENOSPC;
317} 277}
318 278
319static void clear_irq(unsigned int irq)
320{
321 unsigned int pos, nvec;
322 struct msi_desc *msi;
323 struct pcie_port *pp;
324 struct irq_data *data = irq_get_irq_data(irq);
325
326 /* get the port structure */
327 msi = irq_data_get_msi(data);
328 pp = sys_to_pcie(msi->dev->bus->sysdata);
329 if (!pp) {
330 BUG();
331 return;
332 }
333
334 /* undo what was done in assign_irq */
335 pos = data->hwirq;
336 nvec = 1 << msi->msi_attrib.multiple;
337
338 clear_irq_range(pp, irq, nvec, pos);
339
340 /* all irqs cleared; reset attributes */
341 msi->irq = 0;
342 msi->msi_attrib.multiple = 0;
343}
344
345static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 279static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
346 struct msi_desc *desc) 280 struct msi_desc *desc)
347{ 281{
348 int irq, pos, msgvec; 282 int irq, pos;
349 u16 msg_ctr;
350 struct msi_msg msg; 283 struct msi_msg msg;
351 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
352 285
353 if (!pp) { 286 irq = assign_irq(1, desc, &pos);
354 BUG();
355 return -EINVAL;
356 }
357
358 pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
359 &msg_ctr);
360 msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
361 if (msgvec == 0)
362 msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
363 if (msgvec > 5)
364 msgvec = 0;
365
366 irq = assign_irq((1 << msgvec), desc, &pos);
367 if (irq < 0) 287 if (irq < 0)
368 return irq; 288 return irq;
369 289
370 /* 290 if (pp->ops->get_msi_addr)
371 * write_msi_msg() will update PCI_MSI_FLAGS so there is 291 msg.address_lo = pp->ops->get_msi_addr(pp);
372 * no need to explicitly call pci_write_config_word().
373 */
374 desc->msi_attrib.multiple = msgvec;
375
376 if (pp->ops->get_msi_data)
377 msg.address_lo = pp->ops->get_msi_data(pp);
378 else 292 else
379 msg.address_lo = virt_to_phys((void *)pp->msi_data); 293 msg.address_lo = virt_to_phys((void *)pp->msi_data);
380 msg.address_hi = 0x0; 294 msg.address_hi = 0x0;
381 msg.data = pos; 295
296 if (pp->ops->get_msi_data)
297 msg.data = pp->ops->get_msi_data(pp, pos);
298 else
299 msg.data = pos;
300
382 write_msi_msg(irq, &msg); 301 write_msi_msg(irq, &msg);
383 302
384 return 0; 303 return 0;
@@ -386,7 +305,11 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
386 305
387static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 306static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
388{ 307{
389 clear_irq(irq); 308 struct irq_data *data = irq_get_irq_data(irq);
309 struct msi_desc *msi = irq_data_get_msi(data);
310 struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
311
312 clear_irq_range(pp, irq, 1, data->hwirq);
390} 313}
391 314
392static struct msi_chip dw_pcie_msi_chip = { 315static struct msi_chip dw_pcie_msi_chip = {
@@ -425,7 +348,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
425 struct resource *cfg_res; 348 struct resource *cfg_res;
426 u32 val, na, ns; 349 u32 val, na, ns;
427 const __be32 *addrp; 350 const __be32 *addrp;
428 int i, index; 351 int i, index, ret;
429 352
430 /* Find the address cell size and the number of cells in order to get 353 /* Find the address cell size and the number of cells in order to get
431 * the untranslated address. 354 * the untranslated address.
@@ -435,16 +358,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
435 358
436 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 359 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
437 if (cfg_res) { 360 if (cfg_res) {
438 pp->config.cfg0_size = resource_size(cfg_res)/2; 361 pp->cfg0_size = resource_size(cfg_res)/2;
439 pp->config.cfg1_size = resource_size(cfg_res)/2; 362 pp->cfg1_size = resource_size(cfg_res)/2;
440 pp->cfg0_base = cfg_res->start; 363 pp->cfg0_base = cfg_res->start;
441 pp->cfg1_base = cfg_res->start + pp->config.cfg0_size; 364 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
442 365
443 /* Find the untranslated configuration space address */ 366 /* Find the untranslated configuration space address */
444 index = of_property_match_string(np, "reg-names", "config"); 367 index = of_property_match_string(np, "reg-names", "config");
445 addrp = of_get_address(np, index, false, false); 368 addrp = of_get_address(np, index, NULL, NULL);
446 pp->cfg0_mod_base = of_read_number(addrp, ns); 369 pp->cfg0_mod_base = of_read_number(addrp, ns);
447 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size; 370 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
448 } else { 371 } else {
449 dev_err(pp->dev, "missing *config* reg space\n"); 372 dev_err(pp->dev, "missing *config* reg space\n");
450 } 373 }
@@ -466,9 +389,9 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
466 pp->io.end = min_t(resource_size_t, 389 pp->io.end = min_t(resource_size_t,
467 IO_SPACE_LIMIT, 390 IO_SPACE_LIMIT,
468 range.pci_addr + range.size 391 range.pci_addr + range.size
469 + global_io_offset); 392 + global_io_offset - 1);
470 pp->config.io_size = resource_size(&pp->io); 393 pp->io_size = resource_size(&pp->io);
471 pp->config.io_bus_addr = range.pci_addr; 394 pp->io_bus_addr = range.pci_addr;
472 pp->io_base = range.cpu_addr; 395 pp->io_base = range.cpu_addr;
473 396
474 /* Find the untranslated IO space address */ 397 /* Find the untranslated IO space address */
@@ -478,8 +401,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
478 if (restype == IORESOURCE_MEM) { 401 if (restype == IORESOURCE_MEM) {
479 of_pci_range_to_resource(&range, np, &pp->mem); 402 of_pci_range_to_resource(&range, np, &pp->mem);
480 pp->mem.name = "MEM"; 403 pp->mem.name = "MEM";
481 pp->config.mem_size = resource_size(&pp->mem); 404 pp->mem_size = resource_size(&pp->mem);
482 pp->config.mem_bus_addr = range.pci_addr; 405 pp->mem_bus_addr = range.pci_addr;
483 406
484 /* Find the untranslated MEM space address */ 407 /* Find the untranslated MEM space address */
485 pp->mem_mod_base = of_read_number(parser.range - 408 pp->mem_mod_base = of_read_number(parser.range -
@@ -487,19 +410,29 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
487 } 410 }
488 if (restype == 0) { 411 if (restype == 0) {
489 of_pci_range_to_resource(&range, np, &pp->cfg); 412 of_pci_range_to_resource(&range, np, &pp->cfg);
490 pp->config.cfg0_size = resource_size(&pp->cfg)/2; 413 pp->cfg0_size = resource_size(&pp->cfg)/2;
491 pp->config.cfg1_size = resource_size(&pp->cfg)/2; 414 pp->cfg1_size = resource_size(&pp->cfg)/2;
492 pp->cfg0_base = pp->cfg.start; 415 pp->cfg0_base = pp->cfg.start;
493 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; 416 pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
494 417
495 /* Find the untranslated configuration space address */ 418 /* Find the untranslated configuration space address */
496 pp->cfg0_mod_base = of_read_number(parser.range - 419 pp->cfg0_mod_base = of_read_number(parser.range -
497 parser.np + na, ns); 420 parser.np + na, ns);
498 pp->cfg1_mod_base = pp->cfg0_mod_base + 421 pp->cfg1_mod_base = pp->cfg0_mod_base +
499 pp->config.cfg0_size; 422 pp->cfg0_size;
500 } 423 }
501 } 424 }
502 425
426 ret = of_pci_parse_bus_range(np, &pp->busn);
427 if (ret < 0) {
428 pp->busn.name = np->name;
429 pp->busn.start = 0;
430 pp->busn.end = 0xff;
431 pp->busn.flags = IORESOURCE_BUS;
432 dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
433 ret, &pp->busn);
434 }
435
503 if (!pp->dbi_base) { 436 if (!pp->dbi_base) {
504 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, 437 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
505 resource_size(&pp->cfg)); 438 resource_size(&pp->cfg));
@@ -511,17 +444,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
511 444
512 pp->mem_base = pp->mem.start; 445 pp->mem_base = pp->mem.start;
513 446
514 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
515 pp->config.cfg0_size);
516 if (!pp->va_cfg0_base) { 447 if (!pp->va_cfg0_base) {
517 dev_err(pp->dev, "error with ioremap in function\n"); 448 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
518 return -ENOMEM; 449 pp->cfg0_size);
450 if (!pp->va_cfg0_base) {
451 dev_err(pp->dev, "error with ioremap in function\n");
452 return -ENOMEM;
453 }
519 } 454 }
520 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, 455
521 pp->config.cfg1_size);
522 if (!pp->va_cfg1_base) { 456 if (!pp->va_cfg1_base) {
523 dev_err(pp->dev, "error with ioremap\n"); 457 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
524 return -ENOMEM; 458 pp->cfg1_size);
459 if (!pp->va_cfg1_base) {
460 dev_err(pp->dev, "error with ioremap\n");
461 return -ENOMEM;
462 }
525 } 463 }
526 464
527 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { 465 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
@@ -530,16 +468,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
530 } 468 }
531 469
532 if (IS_ENABLED(CONFIG_PCI_MSI)) { 470 if (IS_ENABLED(CONFIG_PCI_MSI)) {
533 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, 471 if (!pp->ops->msi_host_init) {
534 MAX_MSI_IRQS, &msi_domain_ops, 472 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
535 &dw_pcie_msi_chip); 473 MAX_MSI_IRQS, &msi_domain_ops,
536 if (!pp->irq_domain) { 474 &dw_pcie_msi_chip);
537 dev_err(pp->dev, "irq domain init failed\n"); 475 if (!pp->irq_domain) {
538 return -ENXIO; 476 dev_err(pp->dev, "irq domain init failed\n");
539 } 477 return -ENXIO;
478 }
540 479
541 for (i = 0; i < MAX_MSI_IRQS; i++) 480 for (i = 0; i < MAX_MSI_IRQS; i++)
542 irq_create_mapping(pp->irq_domain, i); 481 irq_create_mapping(pp->irq_domain, i);
482 } else {
483 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
484 if (ret < 0)
485 return ret;
486 }
543 } 487 }
544 488
545 if (pp->ops->host_init) 489 if (pp->ops->host_init)
@@ -558,7 +502,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
558 dw_pci.private_data = (void **)&pp; 502 dw_pci.private_data = (void **)&pp;
559 503
560 pci_common_init_dev(pp->dev, &dw_pci); 504 pci_common_init_dev(pp->dev, &dw_pci);
561 pci_assign_unassigned_resources();
562#ifdef CONFIG_PCI_DOMAINS 505#ifdef CONFIG_PCI_DOMAINS
563 dw_pci.domain++; 506 dw_pci.domain++;
564#endif 507#endif
@@ -573,7 +516,7 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
573 PCIE_ATU_VIEWPORT); 516 PCIE_ATU_VIEWPORT);
574 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); 517 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
575 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); 518 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
576 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1, 519 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
577 PCIE_ATU_LIMIT); 520 PCIE_ATU_LIMIT);
578 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 521 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
579 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 522 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -589,7 +532,7 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
589 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); 532 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
590 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); 533 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
591 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); 534 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
592 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1, 535 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
593 PCIE_ATU_LIMIT); 536 PCIE_ATU_LIMIT);
594 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 537 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
595 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 538 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -604,10 +547,10 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
604 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); 547 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
605 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); 548 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
606 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); 549 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
607 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1, 550 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
608 PCIE_ATU_LIMIT); 551 PCIE_ATU_LIMIT);
609 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); 552 dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
610 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), 553 dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
611 PCIE_ATU_UPPER_TARGET); 554 PCIE_ATU_UPPER_TARGET);
612 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 555 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
613} 556}
@@ -620,10 +563,10 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
620 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); 563 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
621 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); 564 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
622 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); 565 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
623 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1, 566 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
624 PCIE_ATU_LIMIT); 567 PCIE_ATU_LIMIT);
625 dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); 568 dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
626 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), 569 dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
627 PCIE_ATU_UPPER_TARGET); 570 PCIE_ATU_UPPER_TARGET);
628 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 571 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
629} 572}
@@ -707,11 +650,6 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
707 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 650 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
708 int ret; 651 int ret;
709 652
710 if (!pp) {
711 BUG();
712 return -EINVAL;
713 }
714
715 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { 653 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
716 *val = 0xffffffff; 654 *val = 0xffffffff;
717 return PCIBIOS_DEVICE_NOT_FOUND; 655 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -736,11 +674,6 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
736 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 674 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
737 int ret; 675 int ret;
738 676
739 if (!pp) {
740 BUG();
741 return -EINVAL;
742 }
743
744 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) 677 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
745 return PCIBIOS_DEVICE_NOT_FOUND; 678 return PCIBIOS_DEVICE_NOT_FOUND;
746 679
@@ -768,19 +701,17 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
768 701
769 pp = sys_to_pcie(sys); 702 pp = sys_to_pcie(sys);
770 703
771 if (!pp) 704 if (global_io_offset < SZ_1M && pp->io_size > 0) {
772 return 0; 705 sys->io_offset = global_io_offset - pp->io_bus_addr;
773
774 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
775 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
776 pci_ioremap_io(global_io_offset, pp->io_base); 706 pci_ioremap_io(global_io_offset, pp->io_base);
777 global_io_offset += SZ_64K; 707 global_io_offset += SZ_64K;
778 pci_add_resource_offset(&sys->resources, &pp->io, 708 pci_add_resource_offset(&sys->resources, &pp->io,
779 sys->io_offset); 709 sys->io_offset);
780 } 710 }
781 711
782 sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; 712 sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
783 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); 713 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
714 pci_add_resource(&sys->resources, &pp->busn);
784 715
785 return 1; 716 return 1;
786} 717}
@@ -790,14 +721,16 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
790 struct pci_bus *bus; 721 struct pci_bus *bus;
791 struct pcie_port *pp = sys_to_pcie(sys); 722 struct pcie_port *pp = sys_to_pcie(sys);
792 723
793 if (pp) { 724 pp->root_bus_nr = sys->busnr;
794 pp->root_bus_nr = sys->busnr; 725 bus = pci_create_root_bus(pp->dev, sys->busnr,
795 bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops, 726 &dw_pcie_ops, sys, &sys->resources);
796 sys, &sys->resources); 727 if (!bus)
797 } else { 728 return NULL;
798 bus = NULL; 729
799 BUG(); 730 pci_scan_child_bus(bus);
800 } 731
732 if (bus && pp->ops->scan_bus)
733 pp->ops->scan_bus(pp);
801 734
802 return bus; 735 return bus;
803} 736}
@@ -833,7 +766,6 @@ static struct hw_pci dw_pci = {
833 766
834void dw_pcie_setup_rc(struct pcie_port *pp) 767void dw_pcie_setup_rc(struct pcie_port *pp)
835{ 768{
836 struct pcie_port_info *config = &pp->config;
837 u32 val; 769 u32 val;
838 u32 membase; 770 u32 membase;
839 u32 memlimit; 771 u32 memlimit;
@@ -888,7 +820,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
888 820
889 /* setup memory base, memory limit */ 821 /* setup memory base, memory limit */
890 membase = ((u32)pp->mem_base & 0xfff00000) >> 16; 822 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
891 memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; 823 memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
892 val = memlimit | membase; 824 val = memlimit | membase;
893 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE); 825 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
894 826
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index daf81f922cda..c6256751daff 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -14,15 +14,6 @@
14#ifndef _PCIE_DESIGNWARE_H 14#ifndef _PCIE_DESIGNWARE_H
15#define _PCIE_DESIGNWARE_H 15#define _PCIE_DESIGNWARE_H
16 16
17struct pcie_port_info {
18 u32 cfg0_size;
19 u32 cfg1_size;
20 u32 io_size;
21 u32 mem_size;
22 phys_addr_t io_bus_addr;
23 phys_addr_t mem_bus_addr;
24};
25
26/* 17/*
27 * Maximum number of MSI IRQs can be 256 per controller. But keep 18 * Maximum number of MSI IRQs can be 256 per controller. But keep
28 * it 32 as of now. Probably we will never need more than 32. If needed, 19 * it 32 as of now. Probably we will never need more than 32. If needed,
@@ -38,17 +29,23 @@ struct pcie_port {
38 u64 cfg0_base; 29 u64 cfg0_base;
39 u64 cfg0_mod_base; 30 u64 cfg0_mod_base;
40 void __iomem *va_cfg0_base; 31 void __iomem *va_cfg0_base;
32 u32 cfg0_size;
41 u64 cfg1_base; 33 u64 cfg1_base;
42 u64 cfg1_mod_base; 34 u64 cfg1_mod_base;
43 void __iomem *va_cfg1_base; 35 void __iomem *va_cfg1_base;
36 u32 cfg1_size;
44 u64 io_base; 37 u64 io_base;
45 u64 io_mod_base; 38 u64 io_mod_base;
39 phys_addr_t io_bus_addr;
40 u32 io_size;
46 u64 mem_base; 41 u64 mem_base;
47 u64 mem_mod_base; 42 u64 mem_mod_base;
43 phys_addr_t mem_bus_addr;
44 u32 mem_size;
48 struct resource cfg; 45 struct resource cfg;
49 struct resource io; 46 struct resource io;
50 struct resource mem; 47 struct resource mem;
51 struct pcie_port_info config; 48 struct resource busn;
52 int irq; 49 int irq;
53 u32 lanes; 50 u32 lanes;
54 struct pcie_host_ops *ops; 51 struct pcie_host_ops *ops;
@@ -73,7 +70,10 @@ struct pcie_host_ops {
73 void (*host_init)(struct pcie_port *pp); 70 void (*host_init)(struct pcie_port *pp);
74 void (*msi_set_irq)(struct pcie_port *pp, int irq); 71 void (*msi_set_irq)(struct pcie_port *pp, int irq);
75 void (*msi_clear_irq)(struct pcie_port *pp, int irq); 72 void (*msi_clear_irq)(struct pcie_port *pp, int irq);
76 u32 (*get_msi_data)(struct pcie_port *pp); 73 u32 (*get_msi_addr)(struct pcie_port *pp);
74 u32 (*get_msi_data)(struct pcie_port *pp, int pos);
75 void (*scan_bus)(struct pcie_port *pp);
76 int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
77}; 77};
78 78
79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); 79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 4884ee5e07d4..61158e03ab5f 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -323,6 +323,7 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
323 323
324 /* Setup PCIe address space mappings for each resource */ 324 /* Setup PCIe address space mappings for each resource */
325 resource_size_t size; 325 resource_size_t size;
326 resource_size_t res_start;
326 u32 mask; 327 u32 mask;
327 328
328 rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); 329 rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -335,8 +336,13 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
335 mask = (roundup_pow_of_two(size) / SZ_128) - 1; 336 mask = (roundup_pow_of_two(size) / SZ_128) - 1;
336 rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); 337 rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
337 338
338 rcar_pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win)); 339 if (res->flags & IORESOURCE_IO)
339 rcar_pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win)); 340 res_start = pci_pio_to_address(res->start);
341 else
342 res_start = res->start;
343
344 rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPARH(win));
345 rcar_pci_write_reg(pcie, lower_32_bits(res_start), PCIEPARL(win));
340 346
341 /* First resource is for IO */ 347 /* First resource is for IO */
342 mask = PAR_ENABLE; 348 mask = PAR_ENABLE;
@@ -363,9 +369,10 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
363 369
364 rcar_pcie_setup_window(i, pcie); 370 rcar_pcie_setup_window(i, pcie);
365 371
366 if (res->flags & IORESOURCE_IO) 372 if (res->flags & IORESOURCE_IO) {
367 pci_ioremap_io(nr * SZ_64K, res->start); 373 phys_addr_t io_start = pci_pio_to_address(res->start);
368 else 374 pci_ioremap_io(nr * SZ_64K, io_start);
375 } else
369 pci_add_resource(&sys->resources, res); 376 pci_add_resource(&sys->resources, res);
370 } 377 }
371 pci_add_resource(&sys->resources, &pcie->busn); 378 pci_add_resource(&sys->resources, &pcie->busn);
@@ -935,8 +942,10 @@ static int rcar_pcie_probe(struct platform_device *pdev)
935 } 942 }
936 943
937 for_each_of_pci_range(&parser, &range) { 944 for_each_of_pci_range(&parser, &range) {
938 of_pci_range_to_resource(&range, pdev->dev.of_node, 945 err = of_pci_range_to_resource(&range, pdev->dev.of_node,
939 &pcie->res[win++]); 946 &pcie->res[win++]);
947 if (err < 0)
948 return err;
940 949
941 if (win > RCAR_PCI_MAX_RESOURCES) 950 if (win > RCAR_PCI_MAX_RESOURCES)
942 break; 951 break;
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 6dea9e43a75c..85f594e1708f 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -340,7 +340,7 @@ static int __init spear13xx_pcie_probe(struct platform_device *pdev)
340 340
341 pp->dev = dev; 341 pp->dev = dev;
342 342
343 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 343 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
344 pp->dbi_base = devm_ioremap_resource(dev, dbi_base); 344 pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
345 if (IS_ERR(pp->dbi_base)) { 345 if (IS_ERR(pp->dbi_base)) {
346 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); 346 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
new file mode 100644
index 000000000000..ccc496b33a97
--- /dev/null
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -0,0 +1,970 @@
1/*
2 * PCIe host controller driver for Xilinx AXI PCIe Bridge
3 *
4 * Copyright (c) 2012 - 2014 Xilinx, Inc.
5 *
6 * Based on the Tegra PCIe driver
7 *
8 * Bits taken from Synopsys Designware Host controller driver and
9 * ARM PCI Host generic driver.
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/irqdomain.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/msi.h>
23#include <linux/of_address.h>
24#include <linux/of_pci.h>
25#include <linux/of_platform.h>
26#include <linux/of_irq.h>
27#include <linux/pci.h>
28#include <linux/platform_device.h>
29
30/* Register definitions */
31#define XILINX_PCIE_REG_BIR 0x00000130
32#define XILINX_PCIE_REG_IDR 0x00000138
33#define XILINX_PCIE_REG_IMR 0x0000013c
34#define XILINX_PCIE_REG_PSCR 0x00000144
35#define XILINX_PCIE_REG_RPSC 0x00000148
36#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
37#define XILINX_PCIE_REG_MSIBASE2 0x00000150
38#define XILINX_PCIE_REG_RPEFR 0x00000154
39#define XILINX_PCIE_REG_RPIFR1 0x00000158
40#define XILINX_PCIE_REG_RPIFR2 0x0000015c
41
42/* Interrupt registers definitions */
43#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
44#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
45#define XILINX_PCIE_INTR_STR_ERR BIT(2)
46#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
47#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
48#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
49#define XILINX_PCIE_INTR_NONFATAL BIT(10)
50#define XILINX_PCIE_INTR_FATAL BIT(11)
51#define XILINX_PCIE_INTR_INTX BIT(16)
52#define XILINX_PCIE_INTR_MSI BIT(17)
53#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
54#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
55#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
56#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
57#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
58#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
59#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
60#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
61#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
62#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
63#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
64
65/* Root Port Error FIFO Read Register definitions */
66#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
67#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
68#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
69
70/* Root Port Interrupt FIFO Read Register 1 definitions */
71#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
72#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
73#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
74#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
75#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
76
77/* Bridge Info Register definitions */
78#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
79#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
80
81/* Root Port Interrupt FIFO Read Register 2 definitions */
82#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
83
84/* Root Port Status/control Register definitions */
85#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
86
87/* Phy Status/Control Register definitions */
88#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
89
90/* ECAM definitions */
91#define ECAM_BUS_NUM_SHIFT 20
92#define ECAM_DEV_NUM_SHIFT 12
93
94/* Number of MSI IRQs */
95#define XILINX_NUM_MSI_IRQS 128
96
97/* Number of Memory Resources */
98#define XILINX_MAX_NUM_RESOURCES 3
99
100/**
101 * struct xilinx_pcie_port - PCIe port information
102 * @reg_base: IO Mapped Register Base
103 * @irq: Interrupt number
104 * @msi_pages: MSI pages
105 * @root_busno: Root Bus number
106 * @dev: Device pointer
107 * @irq_domain: IRQ domain pointer
108 * @bus_range: Bus range
109 * @resources: Bus Resources
110 */
111struct xilinx_pcie_port {
112 void __iomem *reg_base;
113 u32 irq;
114 unsigned long msi_pages;
115 u8 root_busno;
116 struct device *dev;
117 struct irq_domain *irq_domain;
118 struct resource bus_range;
119 struct list_head resources;
120};
121
122static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
123
124static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
125{
126 return sys->private_data;
127}
128
129static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
130{
131 return readl(port->reg_base + reg);
132}
133
134static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
135{
136 writel(val, port->reg_base + reg);
137}
138
139static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
140{
141 return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
142 XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
143}
144
145/**
146 * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
147 * @port: PCIe port information
148 */
149static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
150{
151 u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
152
153 if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
154 dev_dbg(port->dev, "Requester ID %d\n",
155 val & XILINX_PCIE_RPEFR_REQ_ID);
156 pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
157 XILINX_PCIE_REG_RPEFR);
158 }
159}
160
161/**
162 * xilinx_pcie_valid_device - Check if a valid device is present on bus
163 * @bus: PCI Bus structure
164 * @devfn: device/function
165 *
166 * Return: 'true' on success and 'false' if invalid device is found
167 */
168static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
169{
170 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
171
172 /* Check if link is up when trying to access downstream ports */
173 if (bus->number != port->root_busno)
174 if (!xilinx_pcie_link_is_up(port))
175 return false;
176
177 /* Only one device down on each root port */
178 if (bus->number == port->root_busno && devfn > 0)
179 return false;
180
181 /*
182 * Do not read more than one device on the bus directly attached
183 * to RC.
184 */
185 if (bus->primary == port->root_busno && devfn > 0)
186 return false;
187
188 return true;
189}
190
191/**
192 * xilinx_pcie_config_base - Get configuration base
193 * @bus: PCI Bus structure
194 * @devfn: Device/function
195 * @where: Offset from base
196 *
197 * Return: Base address of the configuration space needed to be
198 * accessed.
199 */
200static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
201 unsigned int devfn, int where)
202{
203 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
204 int relbus;
205
206 relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
207 (devfn << ECAM_DEV_NUM_SHIFT);
208
209 return port->reg_base + relbus + where;
210}
211
212/**
213 * xilinx_pcie_read_config - Read configuration space
214 * @bus: PCI Bus structure
215 * @devfn: Device/function
216 * @where: Offset from base
217 * @size: Byte/word/dword
218 * @val: Value to be read
219 *
220 * Return: PCIBIOS_SUCCESSFUL on success
221 * PCIBIOS_DEVICE_NOT_FOUND on failure
222 */
223static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
224 int where, int size, u32 *val)
225{
226 void __iomem *addr;
227
228 if (!xilinx_pcie_valid_device(bus, devfn)) {
229 *val = 0xFFFFFFFF;
230 return PCIBIOS_DEVICE_NOT_FOUND;
231 }
232
233 addr = xilinx_pcie_config_base(bus, devfn, where);
234
235 switch (size) {
236 case 1:
237 *val = readb(addr);
238 break;
239 case 2:
240 *val = readw(addr);
241 break;
242 default:
243 *val = readl(addr);
244 break;
245 }
246
247 return PCIBIOS_SUCCESSFUL;
248}
249
250/**
251 * xilinx_pcie_write_config - Write configuration space
252 * @bus: PCI Bus structure
253 * @devfn: Device/function
254 * @where: Offset from base
255 * @size: Byte/word/dword
256 * @val: Value to be written to device
257 *
258 * Return: PCIBIOS_SUCCESSFUL on success
259 * PCIBIOS_DEVICE_NOT_FOUND on failure
260 */
261static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
262 int where, int size, u32 val)
263{
264 void __iomem *addr;
265
266 if (!xilinx_pcie_valid_device(bus, devfn))
267 return PCIBIOS_DEVICE_NOT_FOUND;
268
269 addr = xilinx_pcie_config_base(bus, devfn, where);
270
271 switch (size) {
272 case 1:
273 writeb(val, addr);
274 break;
275 case 2:
276 writew(val, addr);
277 break;
278 default:
279 writel(val, addr);
280 break;
281 }
282
283 return PCIBIOS_SUCCESSFUL;
284}
285
286/* PCIe operations */
287static struct pci_ops xilinx_pcie_ops = {
288 .read = xilinx_pcie_read_config,
289 .write = xilinx_pcie_write_config,
290};
291
292/* MSI functions */
293
294/**
295 * xilinx_pcie_destroy_msi - Free MSI number
296 * @irq: IRQ to be freed
297 */
298static void xilinx_pcie_destroy_msi(unsigned int irq)
299{
300 struct irq_desc *desc;
301 struct msi_desc *msi;
302 struct xilinx_pcie_port *port;
303
304 desc = irq_to_desc(irq);
305 msi = irq_desc_get_msi_desc(desc);
306 port = sys_to_pcie(msi->dev->bus->sysdata);
307
308 if (!test_bit(irq, msi_irq_in_use))
309 dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
310 else
311 clear_bit(irq, msi_irq_in_use);
312}
313
314/**
315 * xilinx_pcie_assign_msi - Allocate MSI number
316 * @port: PCIe port structure
317 *
318 * Return: A valid IRQ on success and error value on failure.
319 */
320static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
321{
322 int pos;
323
324 pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
325 if (pos < XILINX_NUM_MSI_IRQS)
326 set_bit(pos, msi_irq_in_use);
327 else
328 return -ENOSPC;
329
330 return pos;
331}
332
333/**
334 * xilinx_msi_teardown_irq - Destroy the MSI
335 * @chip: MSI Chip descriptor
336 * @irq: MSI IRQ to destroy
337 */
338static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
339{
340 xilinx_pcie_destroy_msi(irq);
341}
342
343/**
344 * xilinx_pcie_msi_setup_irq - Setup MSI request
345 * @chip: MSI chip pointer
346 * @pdev: PCIe device pointer
347 * @desc: MSI descriptor pointer
348 *
349 * Return: '0' on success and error value on failure
350 */
351static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
352 struct pci_dev *pdev,
353 struct msi_desc *desc)
354{
355 struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
356 unsigned int irq;
357 int hwirq;
358 struct msi_msg msg;
359 phys_addr_t msg_addr;
360
361 hwirq = xilinx_pcie_assign_msi(port);
362 if (hwirq < 0)
363 return hwirq;
364
365 irq = irq_create_mapping(port->irq_domain, hwirq);
366 if (!irq)
367 return -EINVAL;
368
369 irq_set_msi_desc(irq, desc);
370
371 msg_addr = virt_to_phys((void *)port->msi_pages);
372
373 msg.address_hi = 0;
374 msg.address_lo = msg_addr;
375 msg.data = irq;
376
377 write_msi_msg(irq, &msg);
378
379 return 0;
380}
381
382/* MSI Chip Descriptor */
383static struct msi_chip xilinx_pcie_msi_chip = {
384 .setup_irq = xilinx_pcie_msi_setup_irq,
385 .teardown_irq = xilinx_msi_teardown_irq,
386};
387
388/* HW Interrupt Chip Descriptor */
389static struct irq_chip xilinx_msi_irq_chip = {
390 .name = "Xilinx PCIe MSI",
391 .irq_enable = unmask_msi_irq,
392 .irq_disable = mask_msi_irq,
393 .irq_mask = mask_msi_irq,
394 .irq_unmask = unmask_msi_irq,
395};
396
397/**
398 * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
399 * @domain: IRQ domain
400 * @irq: Virtual IRQ number
401 * @hwirq: HW interrupt number
402 *
403 * Return: Always returns 0.
404 */
405static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
406 irq_hw_number_t hwirq)
407{
408 irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
409 irq_set_chip_data(irq, domain->host_data);
410 set_irq_flags(irq, IRQF_VALID);
411
412 return 0;
413}
414
415/* IRQ Domain operations */
416static const struct irq_domain_ops msi_domain_ops = {
417 .map = xilinx_pcie_msi_map,
418};
419
420/**
421 * xilinx_pcie_enable_msi - Enable MSI support
422 * @port: PCIe port information
423 */
424static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
425{
426 phys_addr_t msg_addr;
427
428 port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
429 msg_addr = virt_to_phys((void *)port->msi_pages);
430 pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
431 pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
432}
433
434/**
435 * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
436 * @bus: PCIe bus
437 */
438static void xilinx_pcie_add_bus(struct pci_bus *bus)
439{
440 if (IS_ENABLED(CONFIG_PCI_MSI)) {
441 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
442
443 xilinx_pcie_msi_chip.dev = port->dev;
444 bus->msi = &xilinx_pcie_msi_chip;
445 }
446}
447
448/* INTx Functions */
449
450/**
451 * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
452 * @domain: IRQ domain
453 * @irq: Virtual IRQ number
454 * @hwirq: HW interrupt number
455 *
456 * Return: Always returns 0.
457 */
458static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
459 irq_hw_number_t hwirq)
460{
461 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
462 irq_set_chip_data(irq, domain->host_data);
463 set_irq_flags(irq, IRQF_VALID);
464
465 return 0;
466}
467
468/* INTx IRQ Domain operations */
469static const struct irq_domain_ops intx_domain_ops = {
470 .map = xilinx_pcie_intx_map,
471};
472
473/* PCIe HW Functions */
474
475/**
476 * xilinx_pcie_intr_handler - Interrupt Service Handler
477 * @irq: IRQ number
478 * @data: PCIe port information
479 *
480 * Return: IRQ_HANDLED on success and IRQ_NONE on failure
481 */
482static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
483{
484 struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
485 u32 val, mask, status, msi_data;
486
487 /* Read interrupt decode and mask registers */
488 val = pcie_read(port, XILINX_PCIE_REG_IDR);
489 mask = pcie_read(port, XILINX_PCIE_REG_IMR);
490
491 status = val & mask;
492 if (!status)
493 return IRQ_NONE;
494
495 if (status & XILINX_PCIE_INTR_LINK_DOWN)
496 dev_warn(port->dev, "Link Down\n");
497
498 if (status & XILINX_PCIE_INTR_ECRC_ERR)
499 dev_warn(port->dev, "ECRC failed\n");
500
501 if (status & XILINX_PCIE_INTR_STR_ERR)
502 dev_warn(port->dev, "Streaming error\n");
503
504 if (status & XILINX_PCIE_INTR_HOT_RESET)
505 dev_info(port->dev, "Hot reset\n");
506
507 if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
508 dev_warn(port->dev, "ECAM access timeout\n");
509
510 if (status & XILINX_PCIE_INTR_CORRECTABLE) {
511 dev_warn(port->dev, "Correctable error message\n");
512 xilinx_pcie_clear_err_interrupts(port);
513 }
514
515 if (status & XILINX_PCIE_INTR_NONFATAL) {
516 dev_warn(port->dev, "Non fatal error message\n");
517 xilinx_pcie_clear_err_interrupts(port);
518 }
519
520 if (status & XILINX_PCIE_INTR_FATAL) {
521 dev_warn(port->dev, "Fatal error message\n");
522 xilinx_pcie_clear_err_interrupts(port);
523 }
524
525 if (status & XILINX_PCIE_INTR_INTX) {
526 /* INTx interrupt received */
527 val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
528
529 /* Check whether interrupt valid */
530 if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
531 dev_warn(port->dev, "RP Intr FIFO1 read error\n");
532 return IRQ_HANDLED;
533 }
534
535 /* Clear interrupt FIFO register 1 */
536 pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
537 XILINX_PCIE_REG_RPIFR1);
538
539 /* Handle INTx Interrupt */
540 val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
541 XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
542 generic_handle_irq(irq_find_mapping(port->irq_domain, val));
543 }
544
545 if (status & XILINX_PCIE_INTR_MSI) {
546 /* MSI Interrupt */
547 val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
548
549 if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
550 dev_warn(port->dev, "RP Intr FIFO1 read error\n");
551 return IRQ_HANDLED;
552 }
553
554 if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
555 msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
556 XILINX_PCIE_RPIFR2_MSG_DATA;
557
558 /* Clear interrupt FIFO register 1 */
559 pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
560 XILINX_PCIE_REG_RPIFR1);
561
562 if (IS_ENABLED(CONFIG_PCI_MSI)) {
563 /* Handle MSI Interrupt */
564 generic_handle_irq(msi_data);
565 }
566 }
567 }
568
569 if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
570 dev_warn(port->dev, "Slave unsupported request\n");
571
572 if (status & XILINX_PCIE_INTR_SLV_UNEXP)
573 dev_warn(port->dev, "Slave unexpected completion\n");
574
575 if (status & XILINX_PCIE_INTR_SLV_COMPL)
576 dev_warn(port->dev, "Slave completion timeout\n");
577
578 if (status & XILINX_PCIE_INTR_SLV_ERRP)
579 dev_warn(port->dev, "Slave Error Poison\n");
580
581 if (status & XILINX_PCIE_INTR_SLV_CMPABT)
582 dev_warn(port->dev, "Slave Completer Abort\n");
583
584 if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
585 dev_warn(port->dev, "Slave Illegal Burst\n");
586
587 if (status & XILINX_PCIE_INTR_MST_DECERR)
588 dev_warn(port->dev, "Master decode error\n");
589
590 if (status & XILINX_PCIE_INTR_MST_SLVERR)
591 dev_warn(port->dev, "Master slave error\n");
592
593 if (status & XILINX_PCIE_INTR_MST_ERRP)
594 dev_warn(port->dev, "Master error poison\n");
595
596 /* Clear the Interrupt Decode register */
597 pcie_write(port, status, XILINX_PCIE_REG_IDR);
598
599 return IRQ_HANDLED;
600}
601
602/**
603 * xilinx_pcie_free_irq_domain - Free IRQ domain
604 * @port: PCIe port information
605 */
606static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
607{
608 int i;
609 u32 irq, num_irqs;
610
611 /* Free IRQ Domain */
612 if (IS_ENABLED(CONFIG_PCI_MSI)) {
613
614 free_pages(port->msi_pages, 0);
615
616 num_irqs = XILINX_NUM_MSI_IRQS;
617 } else {
618 /* INTx */
619 num_irqs = 4;
620 }
621
622 for (i = 0; i < num_irqs; i++) {
623 irq = irq_find_mapping(port->irq_domain, i);
624 if (irq > 0)
625 irq_dispose_mapping(irq);
626 }
627
628 irq_domain_remove(port->irq_domain);
629}
630
631/**
632 * xilinx_pcie_init_irq_domain - Initialize IRQ domain
633 * @port: PCIe port information
634 *
635 * Return: '0' on success and error value on failure
636 */
637static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
638{
639 struct device *dev = port->dev;
640 struct device_node *node = dev->of_node;
641 struct device_node *pcie_intc_node;
642
643 /* Setup INTx */
644 pcie_intc_node = of_get_next_child(node, NULL);
645 if (!pcie_intc_node) {
646 dev_err(dev, "No PCIe Intc node found\n");
647 return PTR_ERR(pcie_intc_node);
648 }
649
650 port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
651 &intx_domain_ops,
652 port);
653 if (!port->irq_domain) {
654 dev_err(dev, "Failed to get a INTx IRQ domain\n");
655 return PTR_ERR(port->irq_domain);
656 }
657
658 /* Setup MSI */
659 if (IS_ENABLED(CONFIG_PCI_MSI)) {
660 port->irq_domain = irq_domain_add_linear(node,
661 XILINX_NUM_MSI_IRQS,
662 &msi_domain_ops,
663 &xilinx_pcie_msi_chip);
664 if (!port->irq_domain) {
665 dev_err(dev, "Failed to get a MSI IRQ domain\n");
666 return PTR_ERR(port->irq_domain);
667 }
668
669 xilinx_pcie_enable_msi(port);
670 }
671
672 return 0;
673}
674
675/**
676 * xilinx_pcie_init_port - Initialize hardware
677 * @port: PCIe port information
678 */
679static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
680{
681 if (xilinx_pcie_link_is_up(port))
682 dev_info(port->dev, "PCIe Link is UP\n");
683 else
684 dev_info(port->dev, "PCIe Link is DOWN\n");
685
686 /* Disable all interrupts */
687 pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
688 XILINX_PCIE_REG_IMR);
689
690 /* Clear pending interrupts */
691 pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
692 XILINX_PCIE_IMR_ALL_MASK,
693 XILINX_PCIE_REG_IDR);
694
695 /* Enable all interrupts */
696 pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
697
698 /* Enable the Bridge enable bit */
699 pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
700 XILINX_PCIE_REG_RPSC_BEN,
701 XILINX_PCIE_REG_RPSC);
702}
703
704/**
705 * xilinx_pcie_setup - Setup memory resources
706 * @nr: Bus number
707 * @sys: Per controller structure
708 *
709 * Return: '1' on success and error value on failure
710 */
711static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
712{
713 struct xilinx_pcie_port *port = sys_to_pcie(sys);
714
715 list_splice_init(&port->resources, &sys->resources);
716
717 return 1;
718}
719
720/**
721 * xilinx_pcie_scan_bus - Scan PCIe bus for devices
722 * @nr: Bus number
723 * @sys: Per controller structure
724 *
725 * Return: Valid Bus pointer on success and NULL on failure
726 */
727static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
728{
729 struct xilinx_pcie_port *port = sys_to_pcie(sys);
730 struct pci_bus *bus;
731
732 port->root_busno = sys->busnr;
733 bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
734 sys, &sys->resources);
735
736 return bus;
737}
738
739/**
740 * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
741 * @port: PCIe port information
742 *
743 * Return: '0' on success and error value on failure
744 */
745static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
746{
747 struct device *dev = port->dev;
748 struct device_node *node = dev->of_node;
749 struct resource *mem;
750 resource_size_t offset;
751 struct of_pci_range_parser parser;
752 struct of_pci_range range;
753 struct pci_host_bridge_window *win;
754 int err = 0, mem_resno = 0;
755
756 /* Get the ranges */
757 if (of_pci_range_parser_init(&parser, node)) {
758 dev_err(dev, "missing \"ranges\" property\n");
759 return -EINVAL;
760 }
761
762 /* Parse the ranges and add the resources found to the list */
763 for_each_of_pci_range(&parser, &range) {
764
765 if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
766 dev_err(dev, "Maximum memory resources exceeded\n");
767 return -EINVAL;
768 }
769
770 mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
771 if (!mem) {
772 err = -ENOMEM;
773 goto free_resources;
774 }
775
776 of_pci_range_to_resource(&range, node, mem);
777
778 switch (mem->flags & IORESOURCE_TYPE_BITS) {
779 case IORESOURCE_MEM:
780 offset = range.cpu_addr - range.pci_addr;
781 mem_resno++;
782 break;
783 default:
784 err = -EINVAL;
785 break;
786 }
787
788 if (err < 0) {
789 dev_warn(dev, "Invalid resource found %pR\n", mem);
790 continue;
791 }
792
793 err = request_resource(&iomem_resource, mem);
794 if (err)
795 goto free_resources;
796
797 pci_add_resource_offset(&port->resources, mem, offset);
798 }
799
800 /* Get the bus range */
801 if (of_pci_parse_bus_range(node, &port->bus_range)) {
802 u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
803 u8 last;
804
805 last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
806 XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
807
808 port->bus_range = (struct resource) {
809 .name = node->name,
810 .start = 0,
811 .end = last,
812 .flags = IORESOURCE_BUS,
813 };
814 }
815
816 /* Register bus resource */
817 pci_add_resource(&port->resources, &port->bus_range);
818
819 return 0;
820
821free_resources:
822 release_child_resources(&iomem_resource);
823 list_for_each_entry(win, &port->resources, list)
824 devm_kfree(dev, win->res);
825 pci_free_resource_list(&port->resources);
826
827 return err;
828}
829
830/**
831 * xilinx_pcie_parse_dt - Parse Device tree
832 * @port: PCIe port information
833 *
834 * Return: '0' on success and error value on failure
835 */
836static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
837{
838 struct device *dev = port->dev;
839 struct device_node *node = dev->of_node;
840 struct resource regs;
841 const char *type;
842 int err;
843
844 type = of_get_property(node, "device_type", NULL);
845 if (!type || strcmp(type, "pci")) {
846 dev_err(dev, "invalid \"device_type\" %s\n", type);
847 return -EINVAL;
848 }
849
850 err = of_address_to_resource(node, 0, &regs);
851 if (err) {
852 dev_err(dev, "missing \"reg\" property\n");
853 return err;
854 }
855
856 port->reg_base = devm_ioremap_resource(dev, &regs);
857 if (IS_ERR(port->reg_base))
858 return PTR_ERR(port->reg_base);
859
860 port->irq = irq_of_parse_and_map(node, 0);
861 err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
862 IRQF_SHARED, "xilinx-pcie", port);
863 if (err) {
864 dev_err(dev, "unable to request irq %d\n", port->irq);
865 return err;
866 }
867
868 return 0;
869}
870
871/**
872 * xilinx_pcie_probe - Probe function
873 * @pdev: Platform device pointer
874 *
875 * Return: '0' on success and error value on failure
876 */
877static int xilinx_pcie_probe(struct platform_device *pdev)
878{
879 struct xilinx_pcie_port *port;
880 struct hw_pci hw;
881 struct device *dev = &pdev->dev;
882 int err;
883
884 if (!dev->of_node)
885 return -ENODEV;
886
887 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
888 if (!port)
889 return -ENOMEM;
890
891 port->dev = dev;
892
893 err = xilinx_pcie_parse_dt(port);
894 if (err) {
895 dev_err(dev, "Parsing DT failed\n");
896 return err;
897 }
898
899 xilinx_pcie_init_port(port);
900
901 err = xilinx_pcie_init_irq_domain(port);
902 if (err) {
903 dev_err(dev, "Failed creating IRQ Domain\n");
904 return err;
905 }
906
907 /*
908 * Parse PCI ranges, configuration bus range and
909 * request their resources
910 */
911 INIT_LIST_HEAD(&port->resources);
912 err = xilinx_pcie_parse_and_add_res(port);
913 if (err) {
914 dev_err(dev, "Failed adding resources\n");
915 return err;
916 }
917
918 platform_set_drvdata(pdev, port);
919
920 /* Register the device */
921 memset(&hw, 0, sizeof(hw));
922 hw = (struct hw_pci) {
923 .nr_controllers = 1,
924 .private_data = (void **)&port,
925 .setup = xilinx_pcie_setup,
926 .map_irq = of_irq_parse_and_map_pci,
927 .add_bus = xilinx_pcie_add_bus,
928 .scan = xilinx_pcie_scan_bus,
929 .ops = &xilinx_pcie_ops,
930 };
931 pci_common_init_dev(dev, &hw);
932
933 return 0;
934}
935
936/**
937 * xilinx_pcie_remove - Remove function
938 * @pdev: Platform device pointer
939 *
940 * Return: '0' always
941 */
942static int xilinx_pcie_remove(struct platform_device *pdev)
943{
944 struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
945
946 xilinx_pcie_free_irq_domain(port);
947
948 return 0;
949}
950
951static struct of_device_id xilinx_pcie_of_match[] = {
952 { .compatible = "xlnx,axi-pcie-host-1.00.a", },
953 {}
954};
955
956static struct platform_driver xilinx_pcie_driver = {
957 .driver = {
958 .name = "xilinx-pcie",
959 .owner = THIS_MODULE,
960 .of_match_table = xilinx_pcie_of_match,
961 .suppress_bind_attrs = true,
962 },
963 .probe = xilinx_pcie_probe,
964 .remove = xilinx_pcie_remove,
965};
966module_platform_driver(xilinx_pcie_driver);
967
968MODULE_AUTHOR("Xilinx Inc");
969MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
970MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 3e6532b945c1..4a9aa08b08f1 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
24 24
25obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o 25obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
26 26
27pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o 27pci_hotplug-objs := pci_hotplug_core.o
28 28
29ifdef CONFIG_HOTPLUG_PCI_CPCI 29ifdef CONFIG_HOTPLUG_PCI_CPCI
30pci_hotplug-objs += cpci_hotplug_core.o \ 30pci_hotplug-objs += cpci_hotplug_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index a94d850ae228..876ccc620440 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -46,215 +46,6 @@
46 46
47static bool debug_acpi; 47static bool debug_acpi;
48 48
49static acpi_status
50decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
51{
52 int i;
53 union acpi_object *fields = record->package.elements;
54 u32 revision = fields[1].integer.value;
55
56 switch (revision) {
57 case 1:
58 if (record->package.count != 6)
59 return AE_ERROR;
60 for (i = 2; i < 6; i++)
61 if (fields[i].type != ACPI_TYPE_INTEGER)
62 return AE_ERROR;
63 hpx->t0 = &hpx->type0_data;
64 hpx->t0->revision = revision;
65 hpx->t0->cache_line_size = fields[2].integer.value;
66 hpx->t0->latency_timer = fields[3].integer.value;
67 hpx->t0->enable_serr = fields[4].integer.value;
68 hpx->t0->enable_perr = fields[5].integer.value;
69 break;
70 default:
71 printk(KERN_WARNING
72 "%s: Type 0 Revision %d record not supported\n",
73 __func__, revision);
74 return AE_ERROR;
75 }
76 return AE_OK;
77}
78
79static acpi_status
80decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
81{
82 int i;
83 union acpi_object *fields = record->package.elements;
84 u32 revision = fields[1].integer.value;
85
86 switch (revision) {
87 case 1:
88 if (record->package.count != 5)
89 return AE_ERROR;
90 for (i = 2; i < 5; i++)
91 if (fields[i].type != ACPI_TYPE_INTEGER)
92 return AE_ERROR;
93 hpx->t1 = &hpx->type1_data;
94 hpx->t1->revision = revision;
95 hpx->t1->max_mem_read = fields[2].integer.value;
96 hpx->t1->avg_max_split = fields[3].integer.value;
97 hpx->t1->tot_max_split = fields[4].integer.value;
98 break;
99 default:
100 printk(KERN_WARNING
101 "%s: Type 1 Revision %d record not supported\n",
102 __func__, revision);
103 return AE_ERROR;
104 }
105 return AE_OK;
106}
107
108static acpi_status
109decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
110{
111 int i;
112 union acpi_object *fields = record->package.elements;
113 u32 revision = fields[1].integer.value;
114
115 switch (revision) {
116 case 1:
117 if (record->package.count != 18)
118 return AE_ERROR;
119 for (i = 2; i < 18; i++)
120 if (fields[i].type != ACPI_TYPE_INTEGER)
121 return AE_ERROR;
122 hpx->t2 = &hpx->type2_data;
123 hpx->t2->revision = revision;
124 hpx->t2->unc_err_mask_and = fields[2].integer.value;
125 hpx->t2->unc_err_mask_or = fields[3].integer.value;
126 hpx->t2->unc_err_sever_and = fields[4].integer.value;
127 hpx->t2->unc_err_sever_or = fields[5].integer.value;
128 hpx->t2->cor_err_mask_and = fields[6].integer.value;
129 hpx->t2->cor_err_mask_or = fields[7].integer.value;
130 hpx->t2->adv_err_cap_and = fields[8].integer.value;
131 hpx->t2->adv_err_cap_or = fields[9].integer.value;
132 hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
133 hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
134 hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
135 hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
136 hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
137 hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
138 hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
139 hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
140 break;
141 default:
142 printk(KERN_WARNING
143 "%s: Type 2 Revision %d record not supported\n",
144 __func__, revision);
145 return AE_ERROR;
146 }
147 return AE_OK;
148}
149
150static acpi_status
151acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
152{
153 acpi_status status;
154 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
155 union acpi_object *package, *record, *fields;
156 u32 type;
157 int i;
158
159 /* Clear the return buffer with zeros */
160 memset(hpx, 0, sizeof(struct hotplug_params));
161
162 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
163 if (ACPI_FAILURE(status))
164 return status;
165
166 package = (union acpi_object *)buffer.pointer;
167 if (package->type != ACPI_TYPE_PACKAGE) {
168 status = AE_ERROR;
169 goto exit;
170 }
171
172 for (i = 0; i < package->package.count; i++) {
173 record = &package->package.elements[i];
174 if (record->type != ACPI_TYPE_PACKAGE) {
175 status = AE_ERROR;
176 goto exit;
177 }
178
179 fields = record->package.elements;
180 if (fields[0].type != ACPI_TYPE_INTEGER ||
181 fields[1].type != ACPI_TYPE_INTEGER) {
182 status = AE_ERROR;
183 goto exit;
184 }
185
186 type = fields[0].integer.value;
187 switch (type) {
188 case 0:
189 status = decode_type0_hpx_record(record, hpx);
190 if (ACPI_FAILURE(status))
191 goto exit;
192 break;
193 case 1:
194 status = decode_type1_hpx_record(record, hpx);
195 if (ACPI_FAILURE(status))
196 goto exit;
197 break;
198 case 2:
199 status = decode_type2_hpx_record(record, hpx);
200 if (ACPI_FAILURE(status))
201 goto exit;
202 break;
203 default:
204 printk(KERN_ERR "%s: Type %d record not supported\n",
205 __func__, type);
206 status = AE_ERROR;
207 goto exit;
208 }
209 }
210 exit:
211 kfree(buffer.pointer);
212 return status;
213}
214
215static acpi_status
216acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
217{
218 acpi_status status;
219 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
220 union acpi_object *package, *fields;
221 int i;
222
223 memset(hpp, 0, sizeof(struct hotplug_params));
224
225 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
226 if (ACPI_FAILURE(status))
227 return status;
228
229 package = (union acpi_object *) buffer.pointer;
230 if (package->type != ACPI_TYPE_PACKAGE ||
231 package->package.count != 4) {
232 status = AE_ERROR;
233 goto exit;
234 }
235
236 fields = package->package.elements;
237 for (i = 0; i < 4; i++) {
238 if (fields[i].type != ACPI_TYPE_INTEGER) {
239 status = AE_ERROR;
240 goto exit;
241 }
242 }
243
244 hpp->t0 = &hpp->type0_data;
245 hpp->t0->revision = 1;
246 hpp->t0->cache_line_size = fields[0].integer.value;
247 hpp->t0->latency_timer = fields[1].integer.value;
248 hpp->t0->enable_serr = fields[2].integer.value;
249 hpp->t0->enable_perr = fields[3].integer.value;
250
251exit:
252 kfree(buffer.pointer);
253 return status;
254}
255
256
257
258/* acpi_run_oshp - get control of hotplug from the firmware 49/* acpi_run_oshp - get control of hotplug from the firmware
259 * 50 *
260 * @handle - the handle of the hotplug controller. 51 * @handle - the handle of the hotplug controller.
@@ -283,48 +74,6 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
283 return status; 74 return status;
284} 75}
285 76
286/* pci_get_hp_params
287 *
288 * @dev - the pci_dev for which we want parameters
289 * @hpp - allocated by the caller
290 */
291int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
292{
293 acpi_status status;
294 acpi_handle handle, phandle;
295 struct pci_bus *pbus;
296
297 handle = NULL;
298 for (pbus = dev->bus; pbus; pbus = pbus->parent) {
299 handle = acpi_pci_get_bridge_handle(pbus);
300 if (handle)
301 break;
302 }
303
304 /*
305 * _HPP settings apply to all child buses, until another _HPP is
306 * encountered. If we don't find an _HPP for the input pci dev,
307 * look for it in the parent device scope since that would apply to
308 * this pci dev.
309 */
310 while (handle) {
311 status = acpi_run_hpx(handle, hpp);
312 if (ACPI_SUCCESS(status))
313 return 0;
314 status = acpi_run_hpp(handle, hpp);
315 if (ACPI_SUCCESS(status))
316 return 0;
317 if (acpi_is_root_bridge(handle))
318 break;
319 status = acpi_get_parent(handle, &phandle);
320 if (ACPI_FAILURE(status))
321 break;
322 handle = phandle;
323 }
324 return -ENODEV;
325}
326EXPORT_SYMBOL_GPL(pci_get_hp_params);
327
328/** 77/**
329 * acpi_get_hp_hw_control_from_firmware 78 * acpi_get_hp_hw_control_from_firmware
330 * @dev: the pci_dev of the bridge that has a hotplug controller 79 * @dev: the pci_dev of the bridge that has a hotplug controller
@@ -433,7 +182,8 @@ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
433{ 182{
434 acpi_handle bridge_handle, parent_handle; 183 acpi_handle bridge_handle, parent_handle;
435 184
436 if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) 185 bridge_handle = acpi_pci_get_bridge_handle(pbus);
186 if (!bridge_handle)
437 return 0; 187 return 0;
438 if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) 188 if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle))))
439 return 0; 189 return 0;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 6cd5160fc057..bcb90e4888dd 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,7 +61,6 @@ static DEFINE_MUTEX(bridge_mutex);
61static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type); 61static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
62static void acpiphp_post_dock_fixup(struct acpi_device *adev); 62static void acpiphp_post_dock_fixup(struct acpi_device *adev);
63static void acpiphp_sanitize_bus(struct pci_bus *bus); 63static void acpiphp_sanitize_bus(struct pci_bus *bus);
64static void acpiphp_set_hpp_values(struct pci_bus *bus);
65static void hotplug_event(u32 type, struct acpiphp_context *context); 64static void hotplug_event(u32 type, struct acpiphp_context *context);
66static void free_bridge(struct kref *kref); 65static void free_bridge(struct kref *kref);
67 66
@@ -510,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
510 __pci_bus_assign_resources(bus, &add_list, NULL); 509 __pci_bus_assign_resources(bus, &add_list, NULL);
511 510
512 acpiphp_sanitize_bus(bus); 511 acpiphp_sanitize_bus(bus);
513 acpiphp_set_hpp_values(bus); 512 pcie_bus_configure_settings(bus);
514 acpiphp_set_acpi_region(slot); 513 acpiphp_set_acpi_region(slot);
515 514
516 list_for_each_entry(dev, &bus->devices, bus_list) { 515 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -698,14 +697,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
698 } 697 }
699} 698}
700 699
701static void acpiphp_set_hpp_values(struct pci_bus *bus)
702{
703 struct pci_dev *dev;
704
705 list_for_each_entry(dev, &bus->devices, bus_list)
706 pci_configure_slot(dev);
707}
708
709/* 700/*
710 * Remove devices for which we could not assign resources, call 701 * Remove devices for which we could not assign resources, call
711 * arch specific code to fix-up the bus 702 * arch specific code to fix-up the bus
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8dcccffd6e21..6ca23998ee8f 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -302,7 +302,7 @@ static int ibm_get_table_from_acpi(char **bufp)
302 goto read_table_done; 302 goto read_table_done;
303 } 303 }
304 304
305 for(size = 0, i = 0; i < package->package.count; i++) { 305 for (size = 0, i = 0; i < package->package.count; i++) {
306 if (package->package.elements[i].type != ACPI_TYPE_BUFFER) { 306 if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
307 pr_err("%s: Invalid APCI element %d\n", __func__, i); 307 pr_err("%s: Invalid APCI element %d\n", __func__, i);
308 goto read_table_done; 308 goto read_table_done;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index e09cf7827d68..a5a7fd8332ac 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -125,7 +125,8 @@ disable_slot(struct hotplug_slot *hotplug_slot)
125 125
126 /* Unconfigure device */ 126 /* Unconfigure device */
127 dbg("%s - unconfiguring slot %s", __func__, slot_name(slot)); 127 dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
128 if ((retval = cpci_unconfigure_slot(slot))) { 128 retval = cpci_unconfigure_slot(slot);
129 if (retval) {
129 err("%s - could not unconfigure slot %s", 130 err("%s - could not unconfigure slot %s",
130 __func__, slot_name(slot)); 131 __func__, slot_name(slot));
131 goto disable_error; 132 goto disable_error;
@@ -141,9 +142,11 @@ disable_slot(struct hotplug_slot *hotplug_slot)
141 } 142 }
142 cpci_led_on(slot); 143 cpci_led_on(slot);
143 144
144 if (controller->ops->set_power) 145 if (controller->ops->set_power) {
145 if ((retval = controller->ops->set_power(slot, 0))) 146 retval = controller->ops->set_power(slot, 0);
147 if (retval)
146 goto disable_error; 148 goto disable_error;
149 }
147 150
148 if (update_adapter_status(slot->hotplug_slot, 0)) 151 if (update_adapter_status(slot->hotplug_slot, 0))
149 warn("failure to update adapter file"); 152 warn("failure to update adapter file");
@@ -467,9 +470,9 @@ check_slots(void)
467 __func__, slot_name(slot), hs_csr); 470 __func__, slot_name(slot), hs_csr);
468 471
469 if (!slot->extracting) { 472 if (!slot->extracting) {
470 if (update_latch_status(slot->hotplug_slot, 0)) { 473 if (update_latch_status(slot->hotplug_slot, 0))
471 warn("failure to update latch file"); 474 warn("failure to update latch file");
472 } 475
473 slot->extracting = 1; 476 slot->extracting = 1;
474 atomic_inc(&extracting); 477 atomic_inc(&extracting);
475 } 478 }
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index 04fcd7811400..66b7bbebe493 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -56,7 +56,7 @@
56 if (debug) \ 56 if (debug) \
57 printk (KERN_DEBUG "%s: " format "\n", \ 57 printk (KERN_DEBUG "%s: " format "\n", \
58 MY_NAME , ## arg); \ 58 MY_NAME , ## arg); \
59 } while(0) 59 } while (0)
60#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 60#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
61#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 61#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
62#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg) 62#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,28 +82,28 @@ static int __init validate_parameters(void)
82 char *p; 82 char *p;
83 unsigned long tmp; 83 unsigned long tmp;
84 84
85 if(!bridge) { 85 if (!bridge) {
86 info("not configured, disabling."); 86 info("not configured, disabling.");
87 return -EINVAL; 87 return -EINVAL;
88 } 88 }
89 str = bridge; 89 str = bridge;
90 if(!*str) 90 if (!*str)
91 return -EINVAL; 91 return -EINVAL;
92 92
93 tmp = simple_strtoul(str, &p, 16); 93 tmp = simple_strtoul(str, &p, 16);
94 if(p == str || tmp > 0xff) { 94 if (p == str || tmp > 0xff) {
95 err("Invalid hotplug bus bridge device bus number"); 95 err("Invalid hotplug bus bridge device bus number");
96 return -EINVAL; 96 return -EINVAL;
97 } 97 }
98 bridge_busnr = (u8) tmp; 98 bridge_busnr = (u8) tmp;
99 dbg("bridge_busnr = 0x%02x", bridge_busnr); 99 dbg("bridge_busnr = 0x%02x", bridge_busnr);
100 if(*p != ':') { 100 if (*p != ':') {
101 err("Invalid hotplug bus bridge device"); 101 err("Invalid hotplug bus bridge device");
102 return -EINVAL; 102 return -EINVAL;
103 } 103 }
104 str = p + 1; 104 str = p + 1;
105 tmp = simple_strtoul(str, &p, 16); 105 tmp = simple_strtoul(str, &p, 16);
106 if(p == str || tmp > 0x1f) { 106 if (p == str || tmp > 0x1f) {
107 err("Invalid hotplug bus bridge device slot number"); 107 err("Invalid hotplug bus bridge device slot number");
108 return -EINVAL; 108 return -EINVAL;
109 } 109 }
@@ -112,18 +112,18 @@ static int __init validate_parameters(void)
112 112
113 dbg("first_slot = 0x%02x", first_slot); 113 dbg("first_slot = 0x%02x", first_slot);
114 dbg("last_slot = 0x%02x", last_slot); 114 dbg("last_slot = 0x%02x", last_slot);
115 if(!(first_slot && last_slot)) { 115 if (!(first_slot && last_slot)) {
116 err("Need to specify first_slot and last_slot"); 116 err("Need to specify first_slot and last_slot");
117 return -EINVAL; 117 return -EINVAL;
118 } 118 }
119 if(last_slot < first_slot) { 119 if (last_slot < first_slot) {
120 err("first_slot must be less than last_slot"); 120 err("first_slot must be less than last_slot");
121 return -EINVAL; 121 return -EINVAL;
122 } 122 }
123 123
124 dbg("port = 0x%04x", port); 124 dbg("port = 0x%04x", port);
125 dbg("enum_bit = 0x%02x", enum_bit); 125 dbg("enum_bit = 0x%02x", enum_bit);
126 if(enum_bit > 7) { 126 if (enum_bit > 7) {
127 err("Invalid #ENUM bit"); 127 err("Invalid #ENUM bit");
128 return -EINVAL; 128 return -EINVAL;
129 } 129 }
@@ -151,12 +151,12 @@ static int __init cpcihp_generic_init(void)
151 return status; 151 return status;
152 152
153 r = request_region(port, 1, "#ENUM hotswap signal register"); 153 r = request_region(port, 1, "#ENUM hotswap signal register");
154 if(!r) 154 if (!r)
155 return -EBUSY; 155 return -EBUSY;
156 156
157 dev = pci_get_domain_bus_and_slot(0, bridge_busnr, 157 dev = pci_get_domain_bus_and_slot(0, bridge_busnr,
158 PCI_DEVFN(bridge_slot, 0)); 158 PCI_DEVFN(bridge_slot, 0));
159 if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { 159 if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
160 err("Invalid bridge device %s", bridge); 160 err("Invalid bridge device %s", bridge);
161 pci_dev_put(dev); 161 pci_dev_put(dev);
162 return -EINVAL; 162 return -EINVAL;
@@ -169,21 +169,21 @@ static int __init cpcihp_generic_init(void)
169 generic_hpc.ops = &generic_hpc_ops; 169 generic_hpc.ops = &generic_hpc_ops;
170 170
171 status = cpci_hp_register_controller(&generic_hpc); 171 status = cpci_hp_register_controller(&generic_hpc);
172 if(status != 0) { 172 if (status != 0) {
173 err("Could not register cPCI hotplug controller"); 173 err("Could not register cPCI hotplug controller");
174 return -ENODEV; 174 return -ENODEV;
175 } 175 }
176 dbg("registered controller"); 176 dbg("registered controller");
177 177
178 status = cpci_hp_register_bus(bus, first_slot, last_slot); 178 status = cpci_hp_register_bus(bus, first_slot, last_slot);
179 if(status != 0) { 179 if (status != 0) {
180 err("Could not register cPCI hotplug bus"); 180 err("Could not register cPCI hotplug bus");
181 goto init_bus_register_error; 181 goto init_bus_register_error;
182 } 182 }
183 dbg("registered bus"); 183 dbg("registered bus");
184 184
185 status = cpci_hp_start(); 185 status = cpci_hp_start();
186 if(status != 0) { 186 if (status != 0) {
187 err("Could not started cPCI hotplug system"); 187 err("Could not started cPCI hotplug system");
188 goto init_start_error; 188 goto init_start_error;
189 } 189 }
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 6757b3ef7e10..7ecf34e76a61 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -51,7 +51,7 @@
51 if (debug) \ 51 if (debug) \
52 printk (KERN_DEBUG "%s: " format "\n", \ 52 printk (KERN_DEBUG "%s: " format "\n", \
53 MY_NAME , ## arg); \ 53 MY_NAME , ## arg); \
54 } while(0) 54 } while (0)
55#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 55#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
56#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 56#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
57#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg) 57#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,13 +82,13 @@ static int zt5550_hc_config(struct pci_dev *pdev)
82 int ret; 82 int ret;
83 83
84 /* Since we know that no boards exist with two HC chips, treat it as an error */ 84 /* Since we know that no boards exist with two HC chips, treat it as an error */
85 if(hc_dev) { 85 if (hc_dev) {
86 err("too many host controller devices?"); 86 err("too many host controller devices?");
87 return -EBUSY; 87 return -EBUSY;
88 } 88 }
89 89
90 ret = pci_enable_device(pdev); 90 ret = pci_enable_device(pdev);
91 if(ret) { 91 if (ret) {
92 err("cannot enable %s\n", pci_name(pdev)); 92 err("cannot enable %s\n", pci_name(pdev));
93 return ret; 93 return ret;
94 } 94 }
@@ -98,7 +98,7 @@ static int zt5550_hc_config(struct pci_dev *pdev)
98 dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1)); 98 dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1));
99 dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1)); 99 dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1));
100 100
101 if(!request_mem_region(pci_resource_start(hc_dev, 1), 101 if (!request_mem_region(pci_resource_start(hc_dev, 1),
102 pci_resource_len(hc_dev, 1), MY_NAME)) { 102 pci_resource_len(hc_dev, 1), MY_NAME)) {
103 err("cannot reserve MMIO region"); 103 err("cannot reserve MMIO region");
104 ret = -ENOMEM; 104 ret = -ENOMEM;
@@ -107,7 +107,7 @@ static int zt5550_hc_config(struct pci_dev *pdev)
107 107
108 hc_registers = 108 hc_registers =
109 ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); 109 ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1));
110 if(!hc_registers) { 110 if (!hc_registers) {
111 err("cannot remap MMIO region %llx @ %llx", 111 err("cannot remap MMIO region %llx @ %llx",
112 (unsigned long long)pci_resource_len(hc_dev, 1), 112 (unsigned long long)pci_resource_len(hc_dev, 1),
113 (unsigned long long)pci_resource_start(hc_dev, 1)); 113 (unsigned long long)pci_resource_start(hc_dev, 1));
@@ -146,7 +146,7 @@ exit_disable_device:
146 146
147static int zt5550_hc_cleanup(void) 147static int zt5550_hc_cleanup(void)
148{ 148{
149 if(!hc_dev) 149 if (!hc_dev)
150 return -ENODEV; 150 return -ENODEV;
151 151
152 iounmap(hc_registers); 152 iounmap(hc_registers);
@@ -170,9 +170,9 @@ static int zt5550_hc_check_irq(void *dev_id)
170 u8 reg; 170 u8 reg;
171 171
172 ret = 0; 172 ret = 0;
173 if(dev_id == zt5550_hpc.dev_id) { 173 if (dev_id == zt5550_hpc.dev_id) {
174 reg = readb(csr_int_status); 174 reg = readb(csr_int_status);
175 if(reg) 175 if (reg)
176 ret = 1; 176 ret = 1;
177 } 177 }
178 return ret; 178 return ret;
@@ -182,9 +182,9 @@ static int zt5550_hc_enable_irq(void)
182{ 182{
183 u8 reg; 183 u8 reg;
184 184
185 if(hc_dev == NULL) { 185 if (hc_dev == NULL)
186 return -ENODEV; 186 return -ENODEV;
187 } 187
188 reg = readb(csr_int_mask); 188 reg = readb(csr_int_mask);
189 reg = reg & ~ENUM_INT_MASK; 189 reg = reg & ~ENUM_INT_MASK;
190 writeb(reg, csr_int_mask); 190 writeb(reg, csr_int_mask);
@@ -195,9 +195,8 @@ static int zt5550_hc_disable_irq(void)
195{ 195{
196 u8 reg; 196 u8 reg;
197 197
198 if(hc_dev == NULL) { 198 if (hc_dev == NULL)
199 return -ENODEV; 199 return -ENODEV;
200 }
201 200
202 reg = readb(csr_int_mask); 201 reg = readb(csr_int_mask);
203 reg = reg | ENUM_INT_MASK; 202 reg = reg | ENUM_INT_MASK;
@@ -210,15 +209,15 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
210 int status; 209 int status;
211 210
212 status = zt5550_hc_config(pdev); 211 status = zt5550_hc_config(pdev);
213 if(status != 0) { 212 if (status != 0)
214 return status; 213 return status;
215 } 214
216 dbg("returned from zt5550_hc_config"); 215 dbg("returned from zt5550_hc_config");
217 216
218 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller)); 217 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
219 zt5550_hpc_ops.query_enum = zt5550_hc_query_enum; 218 zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
220 zt5550_hpc.ops = &zt5550_hpc_ops; 219 zt5550_hpc.ops = &zt5550_hpc_ops;
221 if(!poll) { 220 if (!poll) {
222 zt5550_hpc.irq = hc_dev->irq; 221 zt5550_hpc.irq = hc_dev->irq;
223 zt5550_hpc.irq_flags = IRQF_SHARED; 222 zt5550_hpc.irq_flags = IRQF_SHARED;
224 zt5550_hpc.dev_id = hc_dev; 223 zt5550_hpc.dev_id = hc_dev;
@@ -231,15 +230,16 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
231 } 230 }
232 231
233 status = cpci_hp_register_controller(&zt5550_hpc); 232 status = cpci_hp_register_controller(&zt5550_hpc);
234 if(status != 0) { 233 if (status != 0) {
235 err("could not register cPCI hotplug controller"); 234 err("could not register cPCI hotplug controller");
236 goto init_hc_error; 235 goto init_hc_error;
237 } 236 }
238 dbg("registered controller"); 237 dbg("registered controller");
239 238
240 /* Look for first device matching cPCI bus's bridge vendor and device IDs */ 239 /* Look for first device matching cPCI bus's bridge vendor and device IDs */
241 if(!(bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC, 240 bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
242 PCI_DEVICE_ID_DEC_21154, NULL))) { 241 PCI_DEVICE_ID_DEC_21154, NULL);
242 if (!bus0_dev) {
243 status = -ENODEV; 243 status = -ENODEV;
244 goto init_register_error; 244 goto init_register_error;
245 } 245 }
@@ -247,14 +247,14 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
247 pci_dev_put(bus0_dev); 247 pci_dev_put(bus0_dev);
248 248
249 status = cpci_hp_register_bus(bus0, 0x0a, 0x0f); 249 status = cpci_hp_register_bus(bus0, 0x0a, 0x0f);
250 if(status != 0) { 250 if (status != 0) {
251 err("could not register cPCI hotplug bus"); 251 err("could not register cPCI hotplug bus");
252 goto init_register_error; 252 goto init_register_error;
253 } 253 }
254 dbg("registered bus"); 254 dbg("registered bus");
255 255
256 status = cpci_hp_start(); 256 status = cpci_hp_start();
257 if(status != 0) { 257 if (status != 0) {
258 err("could not started cPCI hotplug system"); 258 err("could not started cPCI hotplug system");
259 cpci_hp_unregister_bus(bus0); 259 cpci_hp_unregister_bus(bus0);
260 goto init_register_error; 260 goto init_register_error;
@@ -300,11 +300,11 @@ static int __init zt5550_init(void)
300 300
301 info(DRIVER_DESC " version: " DRIVER_VERSION); 301 info(DRIVER_DESC " version: " DRIVER_VERSION);
302 r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register"); 302 r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register");
303 if(!r) 303 if (!r)
304 return -EBUSY; 304 return -EBUSY;
305 305
306 rc = pci_register_driver(&zt5550_hc_driver); 306 rc = pci_register_driver(&zt5550_hc_driver);
307 if(rc < 0) 307 if (rc < 0)
308 release_region(ENUM_PORT, 1); 308 release_region(ENUM_PORT, 1);
309 return rc; 309 return rc;
310} 310}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 0450f405807d..b28b2d2184cd 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -690,7 +690,7 @@ static inline int cpq_get_latch_status(struct controller *ctrl,
690 690
691 status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)); 691 status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot));
692 692
693 return(status == 0) ? 1 : 0; 693 return (status == 0) ? 1 : 0;
694} 694}
695 695
696 696
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4aaee746df88..a53084ddc118 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -1096,9 +1096,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1096 1096
1097 /* initialize our threads if they haven't already been started up */ 1097 /* initialize our threads if they haven't already been started up */
1098 rc = one_time_init(); 1098 rc = one_time_init();
1099 if (rc) { 1099 if (rc)
1100 goto err_free_bus; 1100 goto err_free_bus;
1101 }
1102 1101
1103 dbg("pdev = %p\n", pdev); 1102 dbg("pdev = %p\n", pdev);
1104 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0)); 1103 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index bde47fce3248..c5cbefee5236 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -705,9 +705,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
705 if (temp == max) { 705 if (temp == max) {
706 *head = max->next; 706 *head = max->next;
707 } else { 707 } else {
708 while (temp && temp->next != max) { 708 while (temp && temp->next != max)
709 temp = temp->next; 709 temp = temp->next;
710 }
711 710
712 if (temp) 711 if (temp)
713 temp->next = max->next; 712 temp->next = max->next;
@@ -903,9 +902,8 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
903 /* 902 /*
904 * Check to see if it was our interrupt 903 * Check to see if it was our interrupt
905 */ 904 */
906 if (!(misc & 0x000C)) { 905 if (!(misc & 0x000C))
907 return IRQ_NONE; 906 return IRQ_NONE;
908 }
909 907
910 if (misc & 0x0004) { 908 if (misc & 0x0004) {
911 /* 909 /*
@@ -1143,7 +1141,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1143 /* We don't allow freq/mode changes if we find another adapter running 1141 /* We don't allow freq/mode changes if we find another adapter running
1144 * in another slot on this controller 1142 * in another slot on this controller
1145 */ 1143 */
1146 for(slot = ctrl->slot; slot; slot = slot->next) { 1144 for (slot = ctrl->slot; slot; slot = slot->next) {
1147 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1145 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1148 continue; 1146 continue;
1149 if (!slot->hotplug_slot || !slot->hotplug_slot->info) 1147 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
@@ -1193,7 +1191,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1193 1191
1194 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); 1192 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
1195 reg16 &= ~0x000F; 1193 reg16 &= ~0x000F;
1196 switch(adapter_speed) { 1194 switch (adapter_speed) {
1197 case(PCI_SPEED_133MHz_PCIX): 1195 case(PCI_SPEED_133MHz_PCIX):
1198 reg = 0x75; 1196 reg = 0x75;
1199 reg16 |= 0xB; 1197 reg16 |= 0xB;
@@ -2006,9 +2004,8 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
2006 /* Check to see if the interlock is closed */ 2004 /* Check to see if the interlock is closed */
2007 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 2005 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
2008 2006
2009 if (tempdword & (0x01 << hp_slot)) { 2007 if (tempdword & (0x01 << hp_slot))
2010 return 1; 2008 return 1;
2011 }
2012 2009
2013 if (func->is_a_board) { 2010 if (func->is_a_board) {
2014 rc = board_replaced(func, ctrl); 2011 rc = board_replaced(func, ctrl);
@@ -2070,9 +2067,8 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
2070 } 2067 }
2071 } 2068 }
2072 2069
2073 if (rc) { 2070 if (rc)
2074 dbg("%s: rc = %d\n", __func__, rc); 2071 dbg("%s: rc = %d\n", __func__, rc);
2075 }
2076 2072
2077 if (p_slot) 2073 if (p_slot)
2078 update_slot_info(ctrl, p_slot); 2074 update_slot_info(ctrl, p_slot);
@@ -2095,9 +2091,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2095 device = func->device; 2091 device = func->device;
2096 func = cpqhp_slot_find(ctrl->bus, device, index++); 2092 func = cpqhp_slot_find(ctrl->bus, device, index++);
2097 p_slot = cpqhp_find_slot(ctrl, device); 2093 p_slot = cpqhp_find_slot(ctrl, device);
2098 if (p_slot) { 2094 if (p_slot)
2099 physical_slot = p_slot->number; 2095 physical_slot = p_slot->number;
2100 }
2101 2096
2102 /* Make sure there are no video controllers here */ 2097 /* Make sure there are no video controllers here */
2103 while (func && !rc) { 2098 while (func && !rc) {
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 0968a9bcb345..1e08ff8c229c 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -204,9 +204,8 @@ static int load_HRT (void __iomem *rom_start)
204 u8 temp_byte = 0xFF; 204 u8 temp_byte = 0xFF;
205 u32 rc; 205 u32 rc;
206 206
207 if (!check_for_compaq_ROM(rom_start)) { 207 if (!check_for_compaq_ROM(rom_start))
208 return -ENODEV; 208 return -ENODEV;
209 }
210 209
211 available = 1024; 210 available = 1024;
212 211
@@ -250,9 +249,8 @@ static u32 store_HRT (void __iomem *rom_start)
250 249
251 available = 1024; 250 available = 1024;
252 251
253 if (!check_for_compaq_ROM(rom_start)) { 252 if (!check_for_compaq_ROM(rom_start))
254 return(1); 253 return(1);
255 }
256 254
257 buffer = (u32*) evbuffer; 255 buffer = (u32*) evbuffer;
258 256
@@ -427,9 +425,9 @@ static u32 store_HRT (void __iomem *rom_start)
427 425
428void compaq_nvram_init (void __iomem *rom_start) 426void compaq_nvram_init (void __iomem *rom_start)
429{ 427{
430 if (rom_start) { 428 if (rom_start)
431 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); 429 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
432 } 430
433 dbg("int15 entry = %p\n", compaq_int15_entry_point); 431 dbg("int15 entry = %p\n", compaq_int15_entry_point);
434 432
435 /* initialize our int15 lock */ 433 /* initialize our int15 lock */
@@ -661,9 +659,8 @@ int compaq_nvram_store (void __iomem *rom_start)
661 659
662 if (evbuffer_init) { 660 if (evbuffer_init) {
663 rc = store_HRT(rom_start); 661 rc = store_HRT(rom_start);
664 if (rc) { 662 if (rc)
665 err(msg_unable_to_save); 663 err(msg_unable_to_save);
666 }
667 } 664 }
668 return rc; 665 return rc;
669} 666}
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index f7b8684a7739..3efaf4c38528 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1023,7 +1023,8 @@ static int enable_slot(struct hotplug_slot *hs)
1023 debug("ENABLING SLOT........\n"); 1023 debug("ENABLING SLOT........\n");
1024 slot_cur = hs->private; 1024 slot_cur = hs->private;
1025 1025
1026 if ((rc = validate(slot_cur, ENABLE))) { 1026 rc = validate(slot_cur, ENABLE);
1027 if (rc) {
1027 err("validate function failed\n"); 1028 err("validate function failed\n");
1028 goto error_nopower; 1029 goto error_nopower;
1029 } 1030 }
@@ -1199,9 +1200,8 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
1199 1200
1200 debug("DISABLING SLOT...\n"); 1201 debug("DISABLING SLOT...\n");
1201 1202
1202 if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) { 1203 if ((slot_cur == NULL) || (slot_cur->ctrl == NULL))
1203 return -ENODEV; 1204 return -ENODEV;
1204 }
1205 1205
1206 flag = slot_cur->flag; 1206 flag = slot_cur->flag;
1207 slot_cur->flag = 1; 1207 slot_cur->flag = 1;
@@ -1336,17 +1336,20 @@ static int __init ibmphp_init(void)
1336 for (i = 0; i < 16; i++) 1336 for (i = 0; i < 16; i++)
1337 irqs[i] = 0; 1337 irqs[i] = 0;
1338 1338
1339 if ((rc = ibmphp_access_ebda())) 1339 rc = ibmphp_access_ebda();
1340 if (rc)
1340 goto error; 1341 goto error;
1341 debug("after ibmphp_access_ebda()\n"); 1342 debug("after ibmphp_access_ebda()\n");
1342 1343
1343 if ((rc = ibmphp_rsrc_init())) 1344 rc = ibmphp_rsrc_init();
1345 if (rc)
1344 goto error; 1346 goto error;
1345 debug("AFTER Resource & EBDA INITIALIZATIONS\n"); 1347 debug("AFTER Resource & EBDA INITIALIZATIONS\n");
1346 1348
1347 max_slots = get_max_slots(); 1349 max_slots = get_max_slots();
1348 1350
1349 if ((rc = ibmphp_register_pci())) 1351 rc = ibmphp_register_pci();
1352 if (rc)
1350 goto error; 1353 goto error;
1351 1354
1352 if (init_ops()) { 1355 if (init_ops()) {
@@ -1355,9 +1358,9 @@ static int __init ibmphp_init(void)
1355 } 1358 }
1356 1359
1357 ibmphp_print_test(); 1360 ibmphp_print_test();
1358 if ((rc = ibmphp_hpc_start_poll_thread())) { 1361 rc = ibmphp_hpc_start_poll_thread();
1362 if (rc)
1359 goto error; 1363 goto error;
1360 }
1361 1364
1362exit: 1365exit:
1363 return rc; 1366 return rc;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 0f65ac555434..d9b197d5c6b4 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -215,9 +215,8 @@ static void __init print_ebda_hpc (void)
215 debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap); 215 debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap);
216 } 216 }
217 217
218 for (index = 0; index < hpc_ptr->bus_count; index++) { 218 for (index = 0; index < hpc_ptr->bus_count; index++)
219 debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num); 219 debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num);
220 }
221 220
222 debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type); 221 debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type);
223 switch (hpc_ptr->ctlr_type) { 222 switch (hpc_ptr->ctlr_type) {
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index a936022956e6..220876715a08 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -997,9 +997,8 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
997 rc = ibmphp_do_disable_slot (pslot); 997 rc = ibmphp_do_disable_slot (pslot);
998 } 998 }
999 999
1000 if (update || disable) { 1000 if (update || disable)
1001 ibmphp_update_slot_info (pslot); 1001 ibmphp_update_slot_info (pslot);
1002 }
1003 1002
1004 debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update); 1003 debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update);
1005 1004
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 2fd296706ce7..814cea22a9fa 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -145,7 +145,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
145 case PCI_HEADER_TYPE_NORMAL: 145 case PCI_HEADER_TYPE_NORMAL:
146 debug ("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class); 146 debug ("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class);
147 assign_alt_irq (cur_func, class_code); 147 assign_alt_irq (cur_func, class_code);
148 if ((rc = configure_device (cur_func)) < 0) { 148 rc = configure_device(cur_func);
149 if (rc < 0) {
149 /* We need to do this in case some other BARs were properly inserted */ 150 /* We need to do this in case some other BARs were properly inserted */
150 err ("was not able to configure devfunc %x on bus %x.\n", 151 err ("was not able to configure devfunc %x on bus %x.\n",
151 cur_func->device, cur_func->busno); 152 cur_func->device, cur_func->busno);
@@ -157,7 +158,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
157 break; 158 break;
158 case PCI_HEADER_TYPE_MULTIDEVICE: 159 case PCI_HEADER_TYPE_MULTIDEVICE:
159 assign_alt_irq (cur_func, class_code); 160 assign_alt_irq (cur_func, class_code);
160 if ((rc = configure_device (cur_func)) < 0) { 161 rc = configure_device(cur_func);
162 if (rc < 0) {
161 /* We need to do this in case some other BARs were properly inserted */ 163 /* We need to do this in case some other BARs were properly inserted */
162 err ("was not able to configure devfunc %x on bus %x...bailing out\n", 164 err ("was not able to configure devfunc %x on bus %x...bailing out\n",
163 cur_func->device, cur_func->busno); 165 cur_func->device, cur_func->busno);
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index f34745abd5b6..219ba8090a37 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -224,7 +224,8 @@ int __init ibmphp_rsrc_init (void)
224 if ((curr->rsrc_type & RESTYPE) == MMASK) { 224 if ((curr->rsrc_type & RESTYPE) == MMASK) {
225 /* no bus structure exists in place yet */ 225 /* no bus structure exists in place yet */
226 if (list_empty (&gbuses)) { 226 if (list_empty (&gbuses)) {
227 if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1))) 227 rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
228 if (rc)
228 return rc; 229 return rc;
229 list_add_tail (&newbus->bus_list, &gbuses); 230 list_add_tail (&newbus->bus_list, &gbuses);
230 debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 231 debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -237,7 +238,8 @@ int __init ibmphp_rsrc_init (void)
237 return rc; 238 return rc;
238 } else { 239 } else {
239 /* went through all the buses and didn't find ours, need to create a new bus node */ 240 /* went through all the buses and didn't find ours, need to create a new bus node */
240 if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1))) 241 rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
242 if (rc)
241 return rc; 243 return rc;
242 244
243 list_add_tail (&newbus->bus_list, &gbuses); 245 list_add_tail (&newbus->bus_list, &gbuses);
@@ -248,7 +250,8 @@ int __init ibmphp_rsrc_init (void)
248 /* prefetchable memory */ 250 /* prefetchable memory */
249 if (list_empty (&gbuses)) { 251 if (list_empty (&gbuses)) {
250 /* no bus structure exists in place yet */ 252 /* no bus structure exists in place yet */
251 if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1))) 253 rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
254 if (rc)
252 return rc; 255 return rc;
253 list_add_tail (&newbus->bus_list, &gbuses); 256 list_add_tail (&newbus->bus_list, &gbuses);
254 debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 257 debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -261,7 +264,8 @@ int __init ibmphp_rsrc_init (void)
261 return rc; 264 return rc;
262 } else { 265 } else {
263 /* went through all the buses and didn't find ours, need to create a new bus node */ 266 /* went through all the buses and didn't find ours, need to create a new bus node */
264 if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1))) 267 rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
268 if (rc)
265 return rc; 269 return rc;
266 list_add_tail (&newbus->bus_list, &gbuses); 270 list_add_tail (&newbus->bus_list, &gbuses);
267 debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 271 debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -271,7 +275,8 @@ int __init ibmphp_rsrc_init (void)
271 /* IO */ 275 /* IO */
272 if (list_empty (&gbuses)) { 276 if (list_empty (&gbuses)) {
273 /* no bus structure exists in place yet */ 277 /* no bus structure exists in place yet */
274 if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1))) 278 rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
279 if (rc)
275 return rc; 280 return rc;
276 list_add_tail (&newbus->bus_list, &gbuses); 281 list_add_tail (&newbus->bus_list, &gbuses);
277 debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 282 debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -283,7 +288,8 @@ int __init ibmphp_rsrc_init (void)
283 return rc; 288 return rc;
284 } else { 289 } else {
285 /* went through all the buses and didn't find ours, need to create a new bus node */ 290 /* went through all the buses and didn't find ours, need to create a new bus node */
286 if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1))) 291 rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
292 if (rc)
287 return rc; 293 return rc;
288 list_add_tail (&newbus->bus_list, &gbuses); 294 list_add_tail (&newbus->bus_list, &gbuses);
289 debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 295 debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -1038,7 +1044,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1038 /* found our range */ 1044 /* found our range */
1039 if (!res_prev) { 1045 if (!res_prev) {
1040 /* first time in the loop */ 1046 /* first time in the loop */
1041 if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { 1047 len_tmp = res_cur->start - 1 - range->start;
1048
1049 if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
1042 debug ("len_tmp = %x\n", len_tmp); 1050 debug ("len_tmp = %x\n", len_tmp);
1043 1051
1044 if ((len_tmp < len_cur) || (len_cur == 0)) { 1052 if ((len_tmp < len_cur) || (len_cur == 0)) {
@@ -1078,7 +1086,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1078 } 1086 }
1079 if (!res_cur->next) { 1087 if (!res_cur->next) {
1080 /* last device on the range */ 1088 /* last device on the range */
1081 if ((range->end != res_cur->end) && ((len_tmp = range->end - (res_cur->end + 1)) >= res->len)) { 1089 len_tmp = range->end - (res_cur->end + 1);
1090
1091 if ((range->end != res_cur->end) && (len_tmp >= res->len)) {
1082 debug ("len_tmp = %x\n", len_tmp); 1092 debug ("len_tmp = %x\n", len_tmp);
1083 if ((len_tmp < len_cur) || (len_cur == 0)) { 1093 if ((len_tmp < len_cur) || (len_cur == 0)) {
1084 1094
@@ -1117,8 +1127,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1117 if (res_prev) { 1127 if (res_prev) {
1118 if (res_prev->rangeno != res_cur->rangeno) { 1128 if (res_prev->rangeno != res_cur->rangeno) {
1119 /* 1st device on this range */ 1129 /* 1st device on this range */
1120 if ((res_cur->start != range->start) && 1130 len_tmp = res_cur->start - 1 - range->start;
1121 ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { 1131
1132 if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
1122 if ((len_tmp < len_cur) || (len_cur == 0)) { 1133 if ((len_tmp < len_cur) || (len_cur == 0)) {
1123 if ((range->start % tmp_divide) == 0) { 1134 if ((range->start % tmp_divide) == 0) {
1124 /* just perfect, starting address is divisible by length */ 1135 /* just perfect, starting address is divisible by length */
@@ -1153,7 +1164,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1153 } 1164 }
1154 } else { 1165 } else {
1155 /* in the same range */ 1166 /* in the same range */
1156 if ((len_tmp = res_cur->start - 1 - res_prev->end - 1) >= res->len) { 1167 len_tmp = res_cur->start - 1 - res_prev->end - 1;
1168
1169 if (len_tmp >= res->len) {
1157 if ((len_tmp < len_cur) || (len_cur == 0)) { 1170 if ((len_tmp < len_cur) || (len_cur == 0)) {
1158 if (((res_prev->end + 1) % tmp_divide) == 0) { 1171 if (((res_prev->end + 1) % tmp_divide) == 0) {
1159 /* just perfect, starting address's divisible by length */ 1172 /* just perfect, starting address's divisible by length */
@@ -1212,7 +1225,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1212 break; 1225 break;
1213 } 1226 }
1214 while (range) { 1227 while (range) {
1215 if ((len_tmp = range->end - range->start) >= res->len) { 1228 len_tmp = range->end - range->start;
1229
1230 if (len_tmp >= res->len) {
1216 if ((len_tmp < len_cur) || (len_cur == 0)) { 1231 if ((len_tmp < len_cur) || (len_cur == 0)) {
1217 if ((range->start % tmp_divide) == 0) { 1232 if ((range->start % tmp_divide) == 0) {
1218 /* just perfect, starting address's divisible by length */ 1233 /* just perfect, starting address's divisible by length */
@@ -1276,7 +1291,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1276 break; 1291 break;
1277 } 1292 }
1278 while (range) { 1293 while (range) {
1279 if ((len_tmp = range->end - range->start) >= res->len) { 1294 len_tmp = range->end - range->start;
1295
1296 if (len_tmp >= res->len) {
1280 if ((len_tmp < len_cur) || (len_cur == 0)) { 1297 if ((len_tmp < len_cur) || (len_cur == 0)) {
1281 if ((range->start % tmp_divide) == 0) { 1298 if ((range->start % tmp_divide) == 0) {
1282 /* just perfect, starting address's divisible by length */ 1299 /* just perfect, starting address's divisible by length */
@@ -1335,7 +1352,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1335 return -EINVAL; 1352 return -EINVAL;
1336 } 1353 }
1337 } 1354 }
1338 } /* end if(!res_cur) */ 1355 } /* end if (!res_cur) */
1339 return -EINVAL; 1356 return -EINVAL;
1340} 1357}
1341 1358
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 9e5a9fbb93d7..b11521953485 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -92,7 +92,7 @@ struct controller {
92 struct slot *slot; 92 struct slot *slot;
93 wait_queue_head_t queue; /* sleep & wake process */ 93 wait_queue_head_t queue; /* sleep & wake process */
94 u32 slot_cap; 94 u32 slot_cap;
95 u32 slot_ctrl; 95 u16 slot_ctrl;
96 struct timer_list poll_timer; 96 struct timer_list poll_timer;
97 unsigned long cmd_started; /* jiffies */ 97 unsigned long cmd_started; /* jiffies */
98 unsigned int cmd_busy:1; 98 unsigned int cmd_busy:1;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 07aa722bb12c..3a5e7e28b874 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -262,6 +262,13 @@ static int pciehp_probe(struct pcie_device *dev)
262 goto err_out_none; 262 goto err_out_none;
263 } 263 }
264 264
265 if (!dev->port->subordinate) {
266 /* Can happen if we run out of bus numbers during probe */
267 dev_err(&dev->device,
268 "Hotplug bridge without secondary bus, ignoring\n");
269 goto err_out_none;
270 }
271
265 ctrl = pcie_init(dev); 272 ctrl = pcie_init(dev);
266 if (!ctrl) { 273 if (!ctrl) {
267 dev_err(&dev->device, "Controller initialization failed\n"); 274 dev_err(&dev->device, "Controller initialization failed\n");
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 2a412fa3b338..0ebf754fc177 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -171,9 +171,9 @@ static void pcie_wait_cmd(struct controller *ctrl)
171 * interrupts. 171 * interrupts.
172 */ 172 */
173 if (!rc) 173 if (!rc)
174 ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n", 174 ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
175 ctrl->slot_ctrl, 175 ctrl->slot_ctrl,
176 jiffies_to_msecs(now - ctrl->cmd_started)); 176 jiffies_to_msecs(jiffies - ctrl->cmd_started));
177} 177}
178 178
179/** 179/**
@@ -422,9 +422,9 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
422 default: 422 default:
423 return; 423 return;
424 } 424 }
425 pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
425 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 426 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
426 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 427 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
427 pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
428} 428}
429 429
430void pciehp_green_led_on(struct slot *slot) 430void pciehp_green_led_on(struct slot *slot)
@@ -614,6 +614,8 @@ void pcie_enable_notification(struct controller *ctrl)
614 PCI_EXP_SLTCTL_DLLSCE); 614 PCI_EXP_SLTCTL_DLLSCE);
615 615
616 pcie_write_cmd(ctrl, cmd, mask); 616 pcie_write_cmd(ctrl, cmd, mask);
617 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
618 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
617} 619}
618 620
619static void pcie_disable_notification(struct controller *ctrl) 621static void pcie_disable_notification(struct controller *ctrl)
@@ -625,6 +627,8 @@ static void pcie_disable_notification(struct controller *ctrl)
625 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | 627 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
626 PCI_EXP_SLTCTL_DLLSCE); 628 PCI_EXP_SLTCTL_DLLSCE);
627 pcie_write_cmd(ctrl, 0, mask); 629 pcie_write_cmd(ctrl, 0, mask);
630 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
631 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
628} 632}
629 633
630/* 634/*
@@ -652,6 +656,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
652 stat_mask |= PCI_EXP_SLTSTA_DLLSC; 656 stat_mask |= PCI_EXP_SLTSTA_DLLSC;
653 657
654 pcie_write_cmd(ctrl, 0, ctrl_mask); 658 pcie_write_cmd(ctrl, 0, ctrl_mask);
659 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
660 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
655 if (pciehp_poll_mode) 661 if (pciehp_poll_mode)
656 del_timer_sync(&ctrl->poll_timer); 662 del_timer_sync(&ctrl->poll_timer);
657 663
@@ -659,6 +665,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
659 665
660 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); 666 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
661 pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask); 667 pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
668 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
669 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
662 if (pciehp_poll_mode) 670 if (pciehp_poll_mode)
663 int_poll_timeout(ctrl->poll_timer.data); 671 int_poll_timeout(ctrl->poll_timer.data);
664 672
@@ -797,9 +805,6 @@ struct controller *pcie_init(struct pcie_device *dev)
797 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 805 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
798 PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); 806 PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
799 807
800 /* Disable software notification */
801 pcie_disable_notification(ctrl);
802
803 ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n", 808 ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
804 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, 809 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
805 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), 810 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 5f871f4c4af1..9e69403be632 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -65,14 +65,7 @@ int pciehp_configure_device(struct slot *p_slot)
65 pci_hp_add_bridge(dev); 65 pci_hp_add_bridge(dev);
66 66
67 pci_assign_unassigned_bridge_resources(bridge); 67 pci_assign_unassigned_bridge_resources(bridge);
68 68 pcie_bus_configure_settings(parent);
69 list_for_each_entry(dev, &parent->devices, bus_list) {
70 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
71 continue;
72
73 pci_configure_slot(dev);
74 }
75
76 pci_bus_add_devices(parent); 69 pci_bus_add_devices(parent);
77 70
78 out: 71 out:
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
deleted file mode 100644
index 3e36ec8d708a..000000000000
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * Copyright (C) 1995,2001 Compaq Computer Corporation
3 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
4 * Copyright (C) 2001 IBM Corp.
5 * Copyright (C) 2003-2004 Intel Corporation
6 * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/pci.h>
27#include <linux/export.h>
28#include <linux/pci_hotplug.h>
29
30static struct hpp_type0 pci_default_type0 = {
31 .revision = 1,
32 .cache_line_size = 8,
33 .latency_timer = 0x40,
34 .enable_serr = 0,
35 .enable_perr = 0,
36};
37
38static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
39{
40 u16 pci_cmd, pci_bctl;
41
42 if (!hpp) {
43 /*
44 * Perhaps we *should* use default settings for PCIe, but
45 * pciehp didn't, so we won't either.
46 */
47 if (pci_is_pcie(dev))
48 return;
49 hpp = &pci_default_type0;
50 }
51
52 if (hpp->revision > 1) {
53 dev_warn(&dev->dev,
54 "PCI settings rev %d not supported; using defaults\n",
55 hpp->revision);
56 hpp = &pci_default_type0;
57 }
58
59 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
60 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
61 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
62 if (hpp->enable_serr)
63 pci_cmd |= PCI_COMMAND_SERR;
64 else
65 pci_cmd &= ~PCI_COMMAND_SERR;
66 if (hpp->enable_perr)
67 pci_cmd |= PCI_COMMAND_PARITY;
68 else
69 pci_cmd &= ~PCI_COMMAND_PARITY;
70 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
71
72 /* Program bridge control value */
73 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
74 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
75 hpp->latency_timer);
76 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
77 if (hpp->enable_serr)
78 pci_bctl |= PCI_BRIDGE_CTL_SERR;
79 else
80 pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
81 if (hpp->enable_perr)
82 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
83 else
84 pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
85 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
86 }
87}
88
89static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
90{
91 if (hpp)
92 dev_warn(&dev->dev, "PCI-X settings not supported\n");
93}
94
95static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
96{
97 int pos;
98 u32 reg32;
99
100 if (!hpp)
101 return;
102
103 if (hpp->revision > 1) {
104 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
105 hpp->revision);
106 return;
107 }
108
109 /* Initialize Device Control Register */
110 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
111 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
112
113 /* Initialize Link Control Register */
114 if (dev->subordinate)
115 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
116 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
117
118 /* Find Advanced Error Reporting Enhanced Capability */
119 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
120 if (!pos)
121 return;
122
123 /* Initialize Uncorrectable Error Mask Register */
124 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
125 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
126 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
127
128 /* Initialize Uncorrectable Error Severity Register */
129 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
130 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
131 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
132
133 /* Initialize Correctable Error Mask Register */
134 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
135 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
136 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
137
138 /* Initialize Advanced Error Capabilities and Control Register */
139 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
140 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
141 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
142
143 /*
144 * FIXME: The following two registers are not supported yet.
145 *
146 * o Secondary Uncorrectable Error Severity Register
147 * o Secondary Uncorrectable Error Mask Register
148 */
149}
150
151void pci_configure_slot(struct pci_dev *dev)
152{
153 struct pci_dev *cdev;
154 struct hotplug_params hpp;
155
156 if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
157 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
158 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
159 return;
160
161 pcie_bus_configure_settings(dev->bus);
162
163 memset(&hpp, 0, sizeof(hpp));
164 pci_get_hp_params(dev, &hpp);
165
166 program_hpp_type2(dev, hpp.t2);
167 program_hpp_type1(dev, hpp.t1);
168 program_hpp_type0(dev, hpp.t0);
169
170 if (dev->subordinate) {
171 list_for_each_entry(cdev, &dev->subordinate->devices,
172 bus_list)
173 pci_configure_slot(cdev);
174 }
175}
176EXPORT_SYMBOL_GPL(pci_configure_slot);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index a81fb67ea9a1..10c7927599b3 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -195,7 +195,8 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
195 int rc = 0; 195 int rc = 0;
196 196
197 ctrl_dbg(ctrl, "Change speed to %d\n", speed); 197 ctrl_dbg(ctrl, "Change speed to %d\n", speed);
198 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) { 198 rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
199 if (rc) {
199 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", 200 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
200 __func__); 201 __func__);
201 return WRONG_BUS_FREQUENCY; 202 return WRONG_BUS_FREQUENCY;
@@ -261,14 +262,16 @@ static int board_added(struct slot *p_slot)
261 } 262 }
262 263
263 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { 264 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
264 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { 265 rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
266 if (rc) {
265 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", 267 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
266 __func__); 268 __func__);
267 return WRONG_BUS_FREQUENCY; 269 return WRONG_BUS_FREQUENCY;
268 } 270 }
269 271
270 /* turn on board, blink green LED, turn off Amber LED */ 272 /* turn on board, blink green LED, turn off Amber LED */
271 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 273 rc = p_slot->hpc_ops->slot_enable(p_slot);
274 if (rc) {
272 ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); 275 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
273 return rc; 276 return rc;
274 } 277 }
@@ -296,7 +299,8 @@ static int board_added(struct slot *p_slot)
296 return rc; 299 return rc;
297 300
298 /* turn on board, blink green LED, turn off Amber LED */ 301 /* turn on board, blink green LED, turn off Amber LED */
299 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 302 rc = p_slot->hpc_ops->slot_enable(p_slot);
303 if (rc) {
300 ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); 304 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
301 return rc; 305 return rc;
302 } 306 }
@@ -595,7 +599,7 @@ static int shpchp_enable_slot (struct slot *p_slot)
595 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); 599 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
596 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 600 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
597 601
598 if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) || 602 if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
599 (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)) 603 (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
600 && p_slot->ctrl->num_slots == 1) { 604 && p_slot->ctrl->num_slots == 1) {
601 /* handle amd pogo errata; this must be done before enable */ 605 /* handle amd pogo errata; this must be done before enable */
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 29e22352822c..7d223e9080ef 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -466,7 +466,8 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
466 u8 m66_cap = !!(slot_reg & MHZ66_CAP); 466 u8 m66_cap = !!(slot_reg & MHZ66_CAP);
467 u8 pi, pcix_cap; 467 u8 pi, pcix_cap;
468 468
469 if ((retval = hpc_get_prog_int(slot, &pi))) 469 retval = hpc_get_prog_int(slot, &pi);
470 if (retval)
470 return retval; 471 return retval;
471 472
472 switch (pi) { 473 switch (pi) {
@@ -798,7 +799,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
798 799
799 ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc); 800 ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
800 801
801 if(!shpchp_poll_mode) { 802 if (!shpchp_poll_mode) {
802 /* 803 /*
803 * Mask Global Interrupt Mask - see implementation 804 * Mask Global Interrupt Mask - see implementation
804 * note on p. 139 of SHPC spec rev 1.0 805 * note on p. 139 of SHPC spec rev 1.0
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 469454e0cc48..f8cd3a27e351 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -69,13 +69,7 @@ int shpchp_configure_device(struct slot *p_slot)
69 } 69 }
70 70
71 pci_assign_unassigned_bridge_resources(bridge); 71 pci_assign_unassigned_bridge_resources(bridge);
72 72 pcie_bus_configure_settings(parent);
73 list_for_each_entry(dev, &parent->devices, bus_list) {
74 if (PCI_SLOT(dev->devfn) != p_slot->device)
75 continue;
76 pci_configure_slot(dev);
77 }
78
79 pci_bus_add_devices(parent); 73 pci_bus_add_devices(parent);
80 74
81 out: 75 out:
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index cb6f24740ee3..4d109c07294a 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -633,7 +633,7 @@ int pci_vfs_assigned(struct pci_dev *dev)
633 * our dev as the physical function and the assigned bit is set 633 * our dev as the physical function and the assigned bit is set
634 */ 634 */
635 if (vfdev->is_virtfn && (vfdev->physfn == dev) && 635 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
636 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) 636 pci_is_dev_assigned(vfdev))
637 vfs_assigned++; 637 vfs_assigned++;
638 638
639 vfdev = pci_get_device(dev->vendor, dev_id, vfdev); 639 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 5a40516444f3..2f7c92c4757a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -56,16 +56,6 @@ void __weak arch_teardown_msi_irq(unsigned int irq)
56 chip->teardown_irq(chip, irq); 56 chip->teardown_irq(chip, irq);
57} 57}
58 58
59int __weak arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
60{
61 struct msi_chip *chip = dev->bus->msi;
62
63 if (!chip || !chip->check_device)
64 return 0;
65
66 return chip->check_device(chip, dev, nvec, type);
67}
68
69int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 59int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
70{ 60{
71 struct msi_desc *entry; 61 struct msi_desc *entry;
@@ -130,7 +120,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
130 } 120 }
131 121
132 if (entry) 122 if (entry)
133 write_msi_msg(irq, &entry->msg); 123 __write_msi_msg(entry, &entry->msg);
134} 124}
135 125
136void __weak arch_restore_msi_irqs(struct pci_dev *dev) 126void __weak arch_restore_msi_irqs(struct pci_dev *dev)
@@ -384,17 +374,6 @@ static void free_msi_irqs(struct pci_dev *dev)
384 iounmap(entry->mask_base); 374 iounmap(entry->mask_base);
385 } 375 }
386 376
387 /*
388 * Its possible that we get into this path
389 * When populate_msi_sysfs fails, which means the entries
390 * were not registered with sysfs. In that case don't
391 * unregister them.
392 */
393 if (entry->kobj.parent) {
394 kobject_del(&entry->kobj);
395 kobject_put(&entry->kobj);
396 }
397
398 list_del(&entry->list); 377 list_del(&entry->list);
399 kfree(entry); 378 kfree(entry);
400 } 379 }
@@ -595,7 +574,6 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
595 entry->msi_attrib.entry_nr = 0; 574 entry->msi_attrib.entry_nr = 0;
596 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); 575 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
597 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 576 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
598 entry->msi_attrib.pos = dev->msi_cap;
599 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; 577 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
600 578
601 if (control & PCI_MSI_FLAGS_64BIT) 579 if (control & PCI_MSI_FLAGS_64BIT)
@@ -699,7 +677,6 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
699 entry->msi_attrib.is_64 = 1; 677 entry->msi_attrib.is_64 = 1;
700 entry->msi_attrib.entry_nr = entries[i].entry; 678 entry->msi_attrib.entry_nr = entries[i].entry;
701 entry->msi_attrib.default_irq = dev->irq; 679 entry->msi_attrib.default_irq = dev->irq;
702 entry->msi_attrib.pos = dev->msix_cap;
703 entry->mask_base = base; 680 entry->mask_base = base;
704 681
705 list_add_tail(&entry->list, &dev->msi_list); 682 list_add_tail(&entry->list, &dev->msi_list);
@@ -806,23 +783,24 @@ out_free:
806} 783}
807 784
808/** 785/**
809 * pci_msi_check_device - check whether MSI may be enabled on a device 786 * pci_msi_supported - check whether MSI may be enabled on a device
810 * @dev: pointer to the pci_dev data structure of MSI device function 787 * @dev: pointer to the pci_dev data structure of MSI device function
811 * @nvec: how many MSIs have been requested ? 788 * @nvec: how many MSIs have been requested ?
812 * @type: are we checking for MSI or MSI-X ?
813 * 789 *
814 * Look at global flags, the device itself, and its parent buses 790 * Look at global flags, the device itself, and its parent buses
815 * to determine if MSI/-X are supported for the device. If MSI/-X is 791 * to determine if MSI/-X are supported for the device. If MSI/-X is
816 * supported return 0, else return an error code. 792 * supported return 1, else return 0.
817 **/ 793 **/
818static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) 794static int pci_msi_supported(struct pci_dev *dev, int nvec)
819{ 795{
820 struct pci_bus *bus; 796 struct pci_bus *bus;
821 int ret;
822 797
823 /* MSI must be globally enabled and supported by the device */ 798 /* MSI must be globally enabled and supported by the device */
824 if (!pci_msi_enable || !dev || dev->no_msi) 799 if (!pci_msi_enable)
825 return -EINVAL; 800 return 0;
801
802 if (!dev || dev->no_msi || dev->current_state != PCI_D0)
803 return 0;
826 804
827 /* 805 /*
828 * You can't ask to have 0 or less MSIs configured. 806 * You can't ask to have 0 or less MSIs configured.
@@ -830,7 +808,7 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
830 * b) the list manipulation code assumes nvec >= 1. 808 * b) the list manipulation code assumes nvec >= 1.
831 */ 809 */
832 if (nvec < 1) 810 if (nvec < 1)
833 return -ERANGE; 811 return 0;
834 812
835 /* 813 /*
836 * Any bridge which does NOT route MSI transactions from its 814 * Any bridge which does NOT route MSI transactions from its
@@ -841,13 +819,9 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
841 */ 819 */
842 for (bus = dev->bus; bus; bus = bus->parent) 820 for (bus = dev->bus; bus; bus = bus->parent)
843 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 821 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
844 return -EINVAL; 822 return 0;
845
846 ret = arch_msi_check_device(dev, nvec, type);
847 if (ret)
848 return ret;
849 823
850 return 0; 824 return 1;
851} 825}
852 826
853/** 827/**
@@ -946,15 +920,14 @@ EXPORT_SYMBOL(pci_msix_vec_count);
946 **/ 920 **/
947int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) 921int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
948{ 922{
949 int status, nr_entries; 923 int nr_entries;
950 int i, j; 924 int i, j;
951 925
952 if (!entries || !dev->msix_cap || dev->current_state != PCI_D0) 926 if (!pci_msi_supported(dev, nvec))
953 return -EINVAL; 927 return -EINVAL;
954 928
955 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); 929 if (!entries)
956 if (status) 930 return -EINVAL;
957 return status;
958 931
959 nr_entries = pci_msix_vec_count(dev); 932 nr_entries = pci_msix_vec_count(dev);
960 if (nr_entries < 0) 933 if (nr_entries < 0)
@@ -978,8 +951,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
978 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); 951 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
979 return -EINVAL; 952 return -EINVAL;
980 } 953 }
981 status = msix_capability_init(dev, entries, nvec); 954 return msix_capability_init(dev, entries, nvec);
982 return status;
983} 955}
984EXPORT_SYMBOL(pci_enable_msix); 956EXPORT_SYMBOL(pci_enable_msix);
985 957
@@ -1062,7 +1034,7 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
1062 int nvec; 1034 int nvec;
1063 int rc; 1035 int rc;
1064 1036
1065 if (dev->current_state != PCI_D0) 1037 if (!pci_msi_supported(dev, minvec))
1066 return -EINVAL; 1038 return -EINVAL;
1067 1039
1068 WARN_ON(!!dev->msi_enabled); 1040 WARN_ON(!!dev->msi_enabled);
@@ -1086,17 +1058,6 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
1086 nvec = maxvec; 1058 nvec = maxvec;
1087 1059
1088 do { 1060 do {
1089 rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
1090 if (rc < 0) {
1091 return rc;
1092 } else if (rc > 0) {
1093 if (rc < minvec)
1094 return -ENOSPC;
1095 nvec = rc;
1096 }
1097 } while (rc);
1098
1099 do {
1100 rc = msi_capability_init(dev, nvec); 1061 rc = msi_capability_init(dev, nvec);
1101 if (rc < 0) { 1062 if (rc < 0) {
1102 return rc; 1063 return rc;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 37263b0ebfe3..6ebf8edc5f3c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -10,6 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/pci_hotplug.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/pci-aspm.h> 15#include <linux/pci-aspm.h>
15#include <linux/pci-acpi.h> 16#include <linux/pci-acpi.h>
@@ -17,6 +18,267 @@
17#include <linux/pm_qos.h> 18#include <linux/pm_qos.h>
18#include "pci.h" 19#include "pci.h"
19 20
21phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
22{
23 acpi_status status = AE_NOT_EXIST;
24 unsigned long long mcfg_addr;
25
26 if (handle)
27 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
28 NULL, &mcfg_addr);
29 if (ACPI_FAILURE(status))
30 return 0;
31
32 return (phys_addr_t)mcfg_addr;
33}
34
35static acpi_status decode_type0_hpx_record(union acpi_object *record,
36 struct hotplug_params *hpx)
37{
38 int i;
39 union acpi_object *fields = record->package.elements;
40 u32 revision = fields[1].integer.value;
41
42 switch (revision) {
43 case 1:
44 if (record->package.count != 6)
45 return AE_ERROR;
46 for (i = 2; i < 6; i++)
47 if (fields[i].type != ACPI_TYPE_INTEGER)
48 return AE_ERROR;
49 hpx->t0 = &hpx->type0_data;
50 hpx->t0->revision = revision;
51 hpx->t0->cache_line_size = fields[2].integer.value;
52 hpx->t0->latency_timer = fields[3].integer.value;
53 hpx->t0->enable_serr = fields[4].integer.value;
54 hpx->t0->enable_perr = fields[5].integer.value;
55 break;
56 default:
57 printk(KERN_WARNING
58 "%s: Type 0 Revision %d record not supported\n",
59 __func__, revision);
60 return AE_ERROR;
61 }
62 return AE_OK;
63}
64
65static acpi_status decode_type1_hpx_record(union acpi_object *record,
66 struct hotplug_params *hpx)
67{
68 int i;
69 union acpi_object *fields = record->package.elements;
70 u32 revision = fields[1].integer.value;
71
72 switch (revision) {
73 case 1:
74 if (record->package.count != 5)
75 return AE_ERROR;
76 for (i = 2; i < 5; i++)
77 if (fields[i].type != ACPI_TYPE_INTEGER)
78 return AE_ERROR;
79 hpx->t1 = &hpx->type1_data;
80 hpx->t1->revision = revision;
81 hpx->t1->max_mem_read = fields[2].integer.value;
82 hpx->t1->avg_max_split = fields[3].integer.value;
83 hpx->t1->tot_max_split = fields[4].integer.value;
84 break;
85 default:
86 printk(KERN_WARNING
87 "%s: Type 1 Revision %d record not supported\n",
88 __func__, revision);
89 return AE_ERROR;
90 }
91 return AE_OK;
92}
93
94static acpi_status decode_type2_hpx_record(union acpi_object *record,
95 struct hotplug_params *hpx)
96{
97 int i;
98 union acpi_object *fields = record->package.elements;
99 u32 revision = fields[1].integer.value;
100
101 switch (revision) {
102 case 1:
103 if (record->package.count != 18)
104 return AE_ERROR;
105 for (i = 2; i < 18; i++)
106 if (fields[i].type != ACPI_TYPE_INTEGER)
107 return AE_ERROR;
108 hpx->t2 = &hpx->type2_data;
109 hpx->t2->revision = revision;
110 hpx->t2->unc_err_mask_and = fields[2].integer.value;
111 hpx->t2->unc_err_mask_or = fields[3].integer.value;
112 hpx->t2->unc_err_sever_and = fields[4].integer.value;
113 hpx->t2->unc_err_sever_or = fields[5].integer.value;
114 hpx->t2->cor_err_mask_and = fields[6].integer.value;
115 hpx->t2->cor_err_mask_or = fields[7].integer.value;
116 hpx->t2->adv_err_cap_and = fields[8].integer.value;
117 hpx->t2->adv_err_cap_or = fields[9].integer.value;
118 hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
119 hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
120 hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
121 hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
122 hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
123 hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
124 hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
125 hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
126 break;
127 default:
128 printk(KERN_WARNING
129 "%s: Type 2 Revision %d record not supported\n",
130 __func__, revision);
131 return AE_ERROR;
132 }
133 return AE_OK;
134}
135
136static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
137{
138 acpi_status status;
139 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
140 union acpi_object *package, *record, *fields;
141 u32 type;
142 int i;
143
144 /* Clear the return buffer with zeros */
145 memset(hpx, 0, sizeof(struct hotplug_params));
146
147 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
148 if (ACPI_FAILURE(status))
149 return status;
150
151 package = (union acpi_object *)buffer.pointer;
152 if (package->type != ACPI_TYPE_PACKAGE) {
153 status = AE_ERROR;
154 goto exit;
155 }
156
157 for (i = 0; i < package->package.count; i++) {
158 record = &package->package.elements[i];
159 if (record->type != ACPI_TYPE_PACKAGE) {
160 status = AE_ERROR;
161 goto exit;
162 }
163
164 fields = record->package.elements;
165 if (fields[0].type != ACPI_TYPE_INTEGER ||
166 fields[1].type != ACPI_TYPE_INTEGER) {
167 status = AE_ERROR;
168 goto exit;
169 }
170
171 type = fields[0].integer.value;
172 switch (type) {
173 case 0:
174 status = decode_type0_hpx_record(record, hpx);
175 if (ACPI_FAILURE(status))
176 goto exit;
177 break;
178 case 1:
179 status = decode_type1_hpx_record(record, hpx);
180 if (ACPI_FAILURE(status))
181 goto exit;
182 break;
183 case 2:
184 status = decode_type2_hpx_record(record, hpx);
185 if (ACPI_FAILURE(status))
186 goto exit;
187 break;
188 default:
189 printk(KERN_ERR "%s: Type %d record not supported\n",
190 __func__, type);
191 status = AE_ERROR;
192 goto exit;
193 }
194 }
195 exit:
196 kfree(buffer.pointer);
197 return status;
198}
199
200static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
201{
202 acpi_status status;
203 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
204 union acpi_object *package, *fields;
205 int i;
206
207 memset(hpp, 0, sizeof(struct hotplug_params));
208
209 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
210 if (ACPI_FAILURE(status))
211 return status;
212
213 package = (union acpi_object *) buffer.pointer;
214 if (package->type != ACPI_TYPE_PACKAGE ||
215 package->package.count != 4) {
216 status = AE_ERROR;
217 goto exit;
218 }
219
220 fields = package->package.elements;
221 for (i = 0; i < 4; i++) {
222 if (fields[i].type != ACPI_TYPE_INTEGER) {
223 status = AE_ERROR;
224 goto exit;
225 }
226 }
227
228 hpp->t0 = &hpp->type0_data;
229 hpp->t0->revision = 1;
230 hpp->t0->cache_line_size = fields[0].integer.value;
231 hpp->t0->latency_timer = fields[1].integer.value;
232 hpp->t0->enable_serr = fields[2].integer.value;
233 hpp->t0->enable_perr = fields[3].integer.value;
234
235exit:
236 kfree(buffer.pointer);
237 return status;
238}
239
240/* pci_get_hp_params
241 *
242 * @dev - the pci_dev for which we want parameters
243 * @hpp - allocated by the caller
244 */
245int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
246{
247 acpi_status status;
248 acpi_handle handle, phandle;
249 struct pci_bus *pbus;
250
251 handle = NULL;
252 for (pbus = dev->bus; pbus; pbus = pbus->parent) {
253 handle = acpi_pci_get_bridge_handle(pbus);
254 if (handle)
255 break;
256 }
257
258 /*
259 * _HPP settings apply to all child buses, until another _HPP is
260 * encountered. If we don't find an _HPP for the input pci dev,
261 * look for it in the parent device scope since that would apply to
262 * this pci dev.
263 */
264 while (handle) {
265 status = acpi_run_hpx(handle, hpp);
266 if (ACPI_SUCCESS(status))
267 return 0;
268 status = acpi_run_hpp(handle, hpp);
269 if (ACPI_SUCCESS(status))
270 return 0;
271 if (acpi_is_root_bridge(handle))
272 break;
273 status = acpi_get_parent(handle, &phandle);
274 if (ACPI_FAILURE(status))
275 break;
276 handle = phandle;
277 }
278 return -ENODEV;
279}
280EXPORT_SYMBOL_GPL(pci_get_hp_params);
281
20/** 282/**
21 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 283 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
22 * @work: Work item to handle. 284 * @work: Work item to handle.
@@ -84,20 +346,6 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
84 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 346 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
85} 347}
86 348
87phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
88{
89 acpi_status status = AE_NOT_EXIST;
90 unsigned long long mcfg_addr;
91
92 if (handle)
93 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
94 NULL, &mcfg_addr);
95 if (ACPI_FAILURE(status))
96 return 0;
97
98 return (phys_addr_t)mcfg_addr;
99}
100
101/* 349/*
102 * _SxD returns the D-state with the highest power 350 * _SxD returns the D-state with the highest power
103 * (lowest D-state number) supported in the S-state "x". 351 * (lowest D-state number) supported in the S-state "x".
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d04c5adafc16..2b3c89425bb5 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -55,7 +55,6 @@ int pci_add_dynid(struct pci_driver *drv,
55 unsigned long driver_data) 55 unsigned long driver_data)
56{ 56{
57 struct pci_dynid *dynid; 57 struct pci_dynid *dynid;
58 int retval;
59 58
60 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 59 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
61 if (!dynid) 60 if (!dynid)
@@ -73,9 +72,7 @@ int pci_add_dynid(struct pci_driver *drv,
73 list_add_tail(&dynid->node, &drv->dynids.list); 72 list_add_tail(&dynid->node, &drv->dynids.list);
74 spin_unlock(&drv->dynids.lock); 73 spin_unlock(&drv->dynids.lock);
75 74
76 retval = driver_attach(&drv->driver); 75 return driver_attach(&drv->driver);
77
78 return retval;
79} 76}
80EXPORT_SYMBOL_GPL(pci_add_dynid); 77EXPORT_SYMBOL_GPL(pci_add_dynid);
81 78
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9ff0a901ecf7..92b6d9ab00e4 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -177,7 +177,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
177{ 177{
178 struct pci_dev *pci_dev = to_pci_dev(dev); 178 struct pci_dev *pci_dev = to_pci_dev(dev);
179 179
180 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n", 180 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
181 pci_dev->vendor, pci_dev->device, 181 pci_dev->vendor, pci_dev->device,
182 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 182 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
183 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 183 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
@@ -250,46 +250,45 @@ static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
250 char *buf) 250 char *buf)
251{ 251{
252 struct pci_dev *pdev = to_pci_dev(dev); 252 struct pci_dev *pdev = to_pci_dev(dev);
253 struct pci_bus *subordinate = pdev->subordinate;
253 254
254 if (!pdev->subordinate) 255 return sprintf(buf, "%u\n", subordinate ?
255 return 0; 256 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
256 257 : !pdev->no_msi);
257 return sprintf(buf, "%u\n",
258 !(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI));
259} 258}
260 259
261static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 260static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
262 const char *buf, size_t count) 261 const char *buf, size_t count)
263{ 262{
264 struct pci_dev *pdev = to_pci_dev(dev); 263 struct pci_dev *pdev = to_pci_dev(dev);
264 struct pci_bus *subordinate = pdev->subordinate;
265 unsigned long val; 265 unsigned long val;
266 266
267 if (kstrtoul(buf, 0, &val) < 0) 267 if (kstrtoul(buf, 0, &val) < 0)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 /*
271 * Bad things may happen if the no_msi flag is changed
272 * while drivers are loaded.
273 */
274 if (!capable(CAP_SYS_ADMIN)) 270 if (!capable(CAP_SYS_ADMIN))
275 return -EPERM; 271 return -EPERM;
276 272
277 /* 273 /*
278 * Maybe devices without subordinate buses shouldn't have this 274 * "no_msi" and "bus_flags" only affect what happens when a driver
279 * attribute in the first place? 275 * requests MSI or MSI-X. They don't affect any drivers that have
276 * already requested MSI or MSI-X.
280 */ 277 */
281 if (!pdev->subordinate) 278 if (!subordinate) {
279 pdev->no_msi = !val;
280 dev_info(&pdev->dev, "MSI/MSI-X %s for future drivers\n",
281 val ? "allowed" : "disallowed");
282 return count; 282 return count;
283
284 /* Is the flag going to change, or keep the value it already had? */
285 if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^
286 !!val) {
287 pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI;
288
289 dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI, bad things could happen\n",
290 val ? "" : " not");
291 } 283 }
292 284
285 if (val)
286 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
287 else
288 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
289
290 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
291 val ? "allowed" : "disallowed");
293 return count; 292 return count;
294} 293}
295static DEVICE_ATTR_RW(msi_bus); 294static DEVICE_ATTR_RW(msi_bus);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2c9ac70254e2..625a4ace10b4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1003,12 +1003,19 @@ int pci_save_state(struct pci_dev *dev)
1003 for (i = 0; i < 16; i++) 1003 for (i = 0; i < 16; i++)
1004 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 1004 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1005 dev->state_saved = true; 1005 dev->state_saved = true;
1006 if ((i = pci_save_pcie_state(dev)) != 0) 1006
1007 i = pci_save_pcie_state(dev);
1008 if (i != 0)
1007 return i; 1009 return i;
1008 if ((i = pci_save_pcix_state(dev)) != 0) 1010
1011 i = pci_save_pcix_state(dev);
1012 if (i != 0)
1009 return i; 1013 return i;
1010 if ((i = pci_save_vc_state(dev)) != 0) 1014
1015 i = pci_save_vc_state(dev);
1016 if (i != 0)
1011 return i; 1017 return i;
1018
1012 return 0; 1019 return 0;
1013} 1020}
1014EXPORT_SYMBOL(pci_save_state); 1021EXPORT_SYMBOL(pci_save_state);
@@ -1907,10 +1914,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1907 if (target_state == PCI_POWER_ERROR) 1914 if (target_state == PCI_POWER_ERROR)
1908 return -EIO; 1915 return -EIO;
1909 1916
1910 /* D3cold during system suspend/hibernate is not supported */
1911 if (target_state > PCI_D3hot)
1912 target_state = PCI_D3hot;
1913
1914 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1917 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1915 1918
1916 error = pci_set_power_state(dev, target_state); 1919 error = pci_set_power_state(dev, target_state);
@@ -2704,6 +2707,37 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2704} 2707}
2705EXPORT_SYMBOL(pci_request_regions_exclusive); 2708EXPORT_SYMBOL(pci_request_regions_exclusive);
2706 2709
2710/**
2711 * pci_remap_iospace - Remap the memory mapped I/O space
2712 * @res: Resource describing the I/O space
2713 * @phys_addr: physical address of range to be mapped
2714 *
2715 * Remap the memory mapped I/O space described by the @res
2716 * and the CPU physical address @phys_addr into virtual address space.
2717 * Only architectures that have memory mapped IO functions defined
2718 * (and the PCI_IOBASE value defined) should call this function.
2719 */
2720int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
2721{
2722#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
2723 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
2724
2725 if (!(res->flags & IORESOURCE_IO))
2726 return -EINVAL;
2727
2728 if (res->end > IO_SPACE_LIMIT)
2729 return -EINVAL;
2730
2731 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
2732 pgprot_device(PAGE_KERNEL));
2733#else
2734 /* this architecture does not have memory mapped I/O space,
2735 so this function should never be called */
2736 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
2737 return -ENODEV;
2738#endif
2739}
2740
2707static void __pci_set_master(struct pci_dev *dev, bool enable) 2741static void __pci_set_master(struct pci_dev *dev, bool enable)
2708{ 2742{
2709 u16 old_cmd, cmd; 2743 u16 old_cmd, cmd;
@@ -4406,6 +4440,15 @@ static void pci_no_domains(void)
4406#endif 4440#endif
4407} 4441}
4408 4442
4443#ifdef CONFIG_PCI_DOMAINS
4444static atomic_t __domain_nr = ATOMIC_INIT(-1);
4445
4446int pci_get_new_domain_nr(void)
4447{
4448 return atomic_inc_return(&__domain_nr);
4449}
4450#endif
4451
4409/** 4452/**
4410 * pci_ext_cfg_avail - can we access extended PCI config space? 4453 * pci_ext_cfg_avail - can we access extended PCI config space?
4411 * 4454 *
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 35d06e177917..c6849d9e86ce 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -89,15 +89,17 @@ static const char *aer_correctable_error_string[] = {
89 NULL, 89 NULL,
90 "Replay Timer Timeout", /* Bit Position 12 */ 90 "Replay Timer Timeout", /* Bit Position 12 */
91 "Advisory Non-Fatal", /* Bit Position 13 */ 91 "Advisory Non-Fatal", /* Bit Position 13 */
92 "Corrected Internal Error", /* Bit Position 14 */
93 "Header Log Overflow", /* Bit Position 15 */
92}; 94};
93 95
94static const char *aer_uncorrectable_error_string[] = { 96static const char *aer_uncorrectable_error_string[] = {
95 NULL, 97 "Undefined", /* Bit Position 0 */
96 NULL, 98 NULL,
97 NULL, 99 NULL,
98 NULL, 100 NULL,
99 "Data Link Protocol", /* Bit Position 4 */ 101 "Data Link Protocol", /* Bit Position 4 */
100 NULL, 102 "Surprise Down Error", /* Bit Position 5 */
101 NULL, 103 NULL,
102 NULL, 104 NULL,
103 NULL, 105 NULL,
@@ -113,6 +115,11 @@ static const char *aer_uncorrectable_error_string[] = {
113 "Malformed TLP", /* Bit Position 18 */ 115 "Malformed TLP", /* Bit Position 18 */
114 "ECRC", /* Bit Position 19 */ 116 "ECRC", /* Bit Position 19 */
115 "Unsupported Request", /* Bit Position 20 */ 117 "Unsupported Request", /* Bit Position 20 */
118 "ACS Violation", /* Bit Position 21 */
119 "Uncorrectable Internal Error", /* Bit Position 22 */
120 "MC Blocked TLP", /* Bit Position 23 */
121 "AtomicOp Egress Blocked", /* Bit Position 24 */
122 "TLP Prefix Blocked Error", /* Bit Position 25 */
116}; 123};
117 124
118static const char *aer_agent_string[] = { 125static const char *aer_agent_string[] = {
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 2ccc9b926ea7..be35da2e105e 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -93,77 +93,6 @@ static int pcie_port_resume_noirq(struct device *dev)
93 return 0; 93 return 0;
94} 94}
95 95
96#ifdef CONFIG_PM_RUNTIME
97struct d3cold_info {
98 bool no_d3cold;
99 unsigned int d3cold_delay;
100};
101
102static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
103{
104 struct d3cold_info *info = data;
105
106 info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
107 info->d3cold_delay);
108 if (pdev->no_d3cold)
109 info->no_d3cold = true;
110 return 0;
111}
112
113static int pcie_port_runtime_suspend(struct device *dev)
114{
115 struct pci_dev *pdev = to_pci_dev(dev);
116 struct d3cold_info d3cold_info = {
117 .no_d3cold = false,
118 .d3cold_delay = PCI_PM_D3_WAIT,
119 };
120
121 /*
122 * If any subordinate device disable D3cold, we should not put
123 * the port into D3cold. The D3cold delay of port should be
124 * the max of that of all subordinate devices.
125 */
126 pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
127 pdev->no_d3cold = d3cold_info.no_d3cold;
128 pdev->d3cold_delay = d3cold_info.d3cold_delay;
129 return 0;
130}
131
132static int pcie_port_runtime_resume(struct device *dev)
133{
134 return 0;
135}
136
137static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
138{
139 bool *pme_poll = data;
140
141 if (pdev->pme_poll)
142 *pme_poll = true;
143 return 0;
144}
145
146static int pcie_port_runtime_idle(struct device *dev)
147{
148 struct pci_dev *pdev = to_pci_dev(dev);
149 bool pme_poll = false;
150
151 /*
152 * If any subordinate device needs pme poll, we should keep
153 * the port in D0, because we need port in D0 to poll it.
154 */
155 pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
156 /* Delay for a short while to prevent too frequent suspend/resume */
157 if (!pme_poll)
158 pm_schedule_suspend(dev, 10);
159 return -EBUSY;
160}
161#else
162#define pcie_port_runtime_suspend NULL
163#define pcie_port_runtime_resume NULL
164#define pcie_port_runtime_idle NULL
165#endif
166
167static const struct dev_pm_ops pcie_portdrv_pm_ops = { 96static const struct dev_pm_ops pcie_portdrv_pm_ops = {
168 .suspend = pcie_port_device_suspend, 97 .suspend = pcie_port_device_suspend,
169 .resume = pcie_port_device_resume, 98 .resume = pcie_port_device_resume,
@@ -172,9 +101,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
172 .poweroff = pcie_port_device_suspend, 101 .poweroff = pcie_port_device_suspend,
173 .restore = pcie_port_device_resume, 102 .restore = pcie_port_device_resume,
174 .resume_noirq = pcie_port_resume_noirq, 103 .resume_noirq = pcie_port_resume_noirq,
175 .runtime_suspend = pcie_port_runtime_suspend,
176 .runtime_resume = pcie_port_runtime_resume,
177 .runtime_idle = pcie_port_runtime_idle,
178}; 104};
179 105
180#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) 106#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4170113cde61..5ed99309c758 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -6,6 +6,7 @@
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/pci_hotplug.h>
9#include <linux/slab.h> 10#include <linux/slab.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/cpumask.h> 12#include <linux/cpumask.h>
@@ -485,7 +486,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
485 } 486 }
486} 487}
487 488
488static struct pci_bus *pci_alloc_bus(void) 489static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
489{ 490{
490 struct pci_bus *b; 491 struct pci_bus *b;
491 492
@@ -500,6 +501,10 @@ static struct pci_bus *pci_alloc_bus(void)
500 INIT_LIST_HEAD(&b->resources); 501 INIT_LIST_HEAD(&b->resources);
501 b->max_bus_speed = PCI_SPEED_UNKNOWN; 502 b->max_bus_speed = PCI_SPEED_UNKNOWN;
502 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 503 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
504#ifdef CONFIG_PCI_DOMAINS_GENERIC
505 if (parent)
506 b->domain_nr = parent->domain_nr;
507#endif
503 return b; 508 return b;
504} 509}
505 510
@@ -671,7 +676,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
671 /* 676 /*
672 * Allocate a new bus, and inherit stuff from the parent.. 677 * Allocate a new bus, and inherit stuff from the parent..
673 */ 678 */
674 child = pci_alloc_bus(); 679 child = pci_alloc_bus(parent);
675 if (!child) 680 if (!child)
676 return NULL; 681 return NULL;
677 682
@@ -740,6 +745,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
740} 745}
741EXPORT_SYMBOL(pci_add_new_bus); 746EXPORT_SYMBOL(pci_add_new_bus);
742 747
748static void pci_enable_crs(struct pci_dev *pdev)
749{
750 u16 root_cap = 0;
751
752 /* Enable CRS Software Visibility if supported */
753 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
754 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
755 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
756 PCI_EXP_RTCTL_CRSSVE);
757}
758
743/* 759/*
744 * If it's a bridge, configure it and scan the bus behind it. 760 * If it's a bridge, configure it and scan the bus behind it.
745 * For CardBus bridges, we don't scan behind as the devices will 761 * For CardBus bridges, we don't scan behind as the devices will
@@ -787,6 +803,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
787 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 803 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
788 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 804 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
789 805
806 pci_enable_crs(dev);
807
790 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 808 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
791 !is_cardbus && !broken) { 809 !is_cardbus && !broken) {
792 unsigned int cmax; 810 unsigned int cmax;
@@ -1226,6 +1244,137 @@ int pci_setup_device(struct pci_dev *dev)
1226 return 0; 1244 return 0;
1227} 1245}
1228 1246
1247static struct hpp_type0 pci_default_type0 = {
1248 .revision = 1,
1249 .cache_line_size = 8,
1250 .latency_timer = 0x40,
1251 .enable_serr = 0,
1252 .enable_perr = 0,
1253};
1254
1255static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1256{
1257 u16 pci_cmd, pci_bctl;
1258
1259 if (!hpp)
1260 hpp = &pci_default_type0;
1261
1262 if (hpp->revision > 1) {
1263 dev_warn(&dev->dev,
1264 "PCI settings rev %d not supported; using defaults\n",
1265 hpp->revision);
1266 hpp = &pci_default_type0;
1267 }
1268
1269 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1270 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1271 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1272 if (hpp->enable_serr)
1273 pci_cmd |= PCI_COMMAND_SERR;
1274 if (hpp->enable_perr)
1275 pci_cmd |= PCI_COMMAND_PARITY;
1276 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1277
1278 /* Program bridge control value */
1279 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1280 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1281 hpp->latency_timer);
1282 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1283 if (hpp->enable_serr)
1284 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1285 if (hpp->enable_perr)
1286 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1287 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1288 }
1289}
1290
1291static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1292{
1293 if (hpp)
1294 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1295}
1296
1297static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1298{
1299 int pos;
1300 u32 reg32;
1301
1302 if (!hpp)
1303 return;
1304
1305 if (hpp->revision > 1) {
1306 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1307 hpp->revision);
1308 return;
1309 }
1310
1311 /*
1312 * Don't allow _HPX to change MPS or MRRS settings. We manage
1313 * those to make sure they're consistent with the rest of the
1314 * platform.
1315 */
1316 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1317 PCI_EXP_DEVCTL_READRQ;
1318 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1319 PCI_EXP_DEVCTL_READRQ);
1320
1321 /* Initialize Device Control Register */
1322 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1323 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1324
1325 /* Initialize Link Control Register */
1326 if (dev->subordinate)
1327 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1328 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1329
1330 /* Find Advanced Error Reporting Enhanced Capability */
1331 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1332 if (!pos)
1333 return;
1334
1335 /* Initialize Uncorrectable Error Mask Register */
1336 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1337 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1338 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1339
1340 /* Initialize Uncorrectable Error Severity Register */
1341 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1342 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1343 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1344
1345 /* Initialize Correctable Error Mask Register */
1346 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1347 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1348 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1349
1350 /* Initialize Advanced Error Capabilities and Control Register */
1351 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1352 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1353 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1354
1355 /*
1356 * FIXME: The following two registers are not supported yet.
1357 *
1358 * o Secondary Uncorrectable Error Severity Register
1359 * o Secondary Uncorrectable Error Mask Register
1360 */
1361}
1362
1363static void pci_configure_device(struct pci_dev *dev)
1364{
1365 struct hotplug_params hpp;
1366 int ret;
1367
1368 memset(&hpp, 0, sizeof(hpp));
1369 ret = pci_get_hp_params(dev, &hpp);
1370 if (ret)
1371 return;
1372
1373 program_hpp_type2(dev, hpp.t2);
1374 program_hpp_type1(dev, hpp.t1);
1375 program_hpp_type0(dev, hpp.t0);
1376}
1377
1229static void pci_release_capabilities(struct pci_dev *dev) 1378static void pci_release_capabilities(struct pci_dev *dev)
1230{ 1379{
1231 pci_vpd_release(dev); 1380 pci_vpd_release(dev);
@@ -1282,8 +1431,13 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1282 *l == 0x0000ffff || *l == 0xffff0000) 1431 *l == 0x0000ffff || *l == 0xffff0000)
1283 return false; 1432 return false;
1284 1433
1285 /* Configuration request Retry Status */ 1434 /*
1286 while (*l == 0xffff0001) { 1435 * Configuration Request Retry Status. Some root ports return the
1436 * actual device ID instead of the synthetic ID (0xFFFF) required
1437 * by the PCIe spec. Ignore the device ID and only check for
1438 * (vendor id == 1).
1439 */
1440 while ((*l & 0xffff) == 0x0001) {
1287 if (!crs_timeout) 1441 if (!crs_timeout)
1288 return false; 1442 return false;
1289 1443
@@ -1363,6 +1517,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1363{ 1517{
1364 int ret; 1518 int ret;
1365 1519
1520 pci_configure_device(dev);
1521
1366 device_initialize(&dev->dev); 1522 device_initialize(&dev->dev);
1367 dev->dev.release = pci_release_dev; 1523 dev->dev.release = pci_release_dev;
1368 1524
@@ -1751,13 +1907,14 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1751 char bus_addr[64]; 1907 char bus_addr[64];
1752 char *fmt; 1908 char *fmt;
1753 1909
1754 b = pci_alloc_bus(); 1910 b = pci_alloc_bus(NULL);
1755 if (!b) 1911 if (!b)
1756 return NULL; 1912 return NULL;
1757 1913
1758 b->sysdata = sysdata; 1914 b->sysdata = sysdata;
1759 b->ops = ops; 1915 b->ops = ops;
1760 b->number = b->busn_res.start = bus; 1916 b->number = b->busn_res.start = bus;
1917 pci_bus_assign_domain_nr(b, parent);
1761 b2 = pci_find_bus(pci_domain_nr(b), bus); 1918 b2 = pci_find_bus(pci_domain_nr(b), bus);
1762 if (b2) { 1919 if (b2) {
1763 /* If we already got to this bus through a different bridge, ignore it */ 1920 /* If we already got to this bus through a different bridge, ignore it */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 80c2d014283d..90acb32c85b1 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/ktime.h> 26#include <linux/ktime.h>
27#include <linux/mm.h>
27#include <asm/dma.h> /* isa_dma_bridge_buggy */ 28#include <asm/dma.h> /* isa_dma_bridge_buggy */
28#include "pci.h" 29#include "pci.h"
29 30
@@ -287,6 +288,25 @@ static void quirk_citrine(struct pci_dev *dev)
287} 288}
288DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); 289DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
289 290
291/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
292static void quirk_extend_bar_to_page(struct pci_dev *dev)
293{
294 int i;
295
296 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
297 struct resource *r = &dev->resource[i];
298
299 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
300 r->end = PAGE_SIZE - 1;
301 r->start = 0;
302 r->flags |= IORESOURCE_UNSET;
303 dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
304 i, r);
305 }
306 }
307}
308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
309
290/* 310/*
291 * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. 311 * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
292 * If it's needed, re-allocate the region. 312 * If it's needed, re-allocate the region.
@@ -2985,6 +3005,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
2985 */ 3005 */
2986DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, 3006DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
2987 quirk_broken_intx_masking); 3007 quirk_broken_intx_masking);
3008DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3009 quirk_broken_intx_masking);
2988 3010
2989#ifdef CONFIG_ACPI 3011#ifdef CONFIG_ACPI
2990/* 3012/*
@@ -3512,57 +3534,6 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
3512/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ 3534/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
3513DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); 3535DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
3514 3536
3515static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
3516{
3517 if (!PCI_FUNC(dev->devfn))
3518 return pci_dev_get(dev);
3519
3520 return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
3521}
3522
3523static const struct pci_dev_dma_source {
3524 u16 vendor;
3525 u16 device;
3526 struct pci_dev *(*dma_source)(struct pci_dev *dev);
3527} pci_dev_dma_source[] = {
3528 /*
3529 * https://bugzilla.redhat.com/show_bug.cgi?id=605888
3530 *
3531 * Some Ricoh devices use the function 0 source ID for DMA on
3532 * other functions of a multifunction device. The DMA devices
3533 * is therefore function 0, which will have implications of the
3534 * iommu grouping of these devices.
3535 */
3536 { PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
3537 { PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
3538 { PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
3539 { PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
3540 { 0 }
3541};
3542
3543/*
3544 * IOMMUs with isolation capabilities need to be programmed with the
3545 * correct source ID of a device. In most cases, the source ID matches
3546 * the device doing the DMA, but sometimes hardware is broken and will
3547 * tag the DMA as being sourced from a different device. This function
3548 * allows that translation. Note that the reference count of the
3549 * returned device is incremented on all paths.
3550 */
3551struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
3552{
3553 const struct pci_dev_dma_source *i;
3554
3555 for (i = pci_dev_dma_source; i->dma_source; i++) {
3556 if ((i->vendor == dev->vendor ||
3557 i->vendor == (u16)PCI_ANY_ID) &&
3558 (i->device == dev->device ||
3559 i->device == (u16)PCI_ANY_ID))
3560 return i->dma_source(dev);
3561 }
3562
3563 return pci_dev_get(dev);
3564}
3565
3566/* 3537/*
3567 * AMD has indicated that the devices below do not support peer-to-peer 3538 * AMD has indicated that the devices below do not support peer-to-peer
3568 * in any system where they are found in the southbridge with an AMD 3539 * in any system where they are found in the southbridge with an AMD
@@ -3582,6 +3553,11 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
3582 * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller 3553 * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
3583 * 1002:4384 SBx00 PCI to PCI Bridge 3554 * 1002:4384 SBx00 PCI to PCI Bridge
3584 * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller 3555 * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
3556 *
3557 * https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15
3558 *
3559 * 1022:780f [AMD] FCH PCI Bridge
3560 * 1022:7809 [AMD] FCH USB OHCI Controller
3585 */ 3561 */
3586static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) 3562static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
3587{ 3563{
@@ -3664,6 +3640,23 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
3664 return acs_flags & ~flags ? 0 : 1; 3640 return acs_flags & ~flags ? 0 : 1;
3665} 3641}
3666 3642
3643static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
3644{
3645 /*
3646 * SV, TB, and UF are not relevant to multifunction endpoints.
3647 *
3648 * Multifunction devices are only required to implement RR, CR, and DT
3649 * in their ACS capability if they support peer-to-peer transactions.
3650 * Devices matching this quirk have been verified by the vendor to not
3651 * perform peer-to-peer with other functions, allowing us to mask out
3652 * these bits as if they were unimplemented in the ACS capability.
3653 */
3654 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
3655 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
3656
3657 return acs_flags ? 0 : 1;
3658}
3659
3667static const struct pci_dev_acs_enabled { 3660static const struct pci_dev_acs_enabled {
3668 u16 vendor; 3661 u16 vendor;
3669 u16 device; 3662 u16 device;
@@ -3675,6 +3668,30 @@ static const struct pci_dev_acs_enabled {
3675 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs }, 3668 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
3676 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs }, 3669 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
3677 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs }, 3670 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
3671 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
3672 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
3673 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
3674 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
3675 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
3676 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
3677 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
3678 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
3679 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
3680 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
3681 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
3682 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
3683 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
3684 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
3685 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
3686 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
3687 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
3688 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
3689 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
3690 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
3691 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
3692 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
3693 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
3694 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
3678 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, 3695 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
3679 { 0 } 3696 { 0 }
3680}; 3697};
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 827ad831f1dd..a81f413083e4 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -103,40 +103,6 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
103 return ret; 103 return ret;
104} 104}
105 105
106/*
107 * find the upstream PCIe-to-PCI bridge of a PCI device
108 * if the device is PCIE, return NULL
109 * if the device isn't connected to a PCIe bridge (that is its parent is a
110 * legacy PCI bridge and the bridge is directly connected to bus 0), return its
111 * parent
112 */
113struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
114{
115 struct pci_dev *tmp = NULL;
116
117 if (pci_is_pcie(pdev))
118 return NULL;
119 while (1) {
120 if (pci_is_root_bus(pdev->bus))
121 break;
122 pdev = pdev->bus->self;
123 /* a p2p bridge */
124 if (!pci_is_pcie(pdev)) {
125 tmp = pdev;
126 continue;
127 }
128 /* PCI device should connect to a PCIe bridge */
129 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) {
130 /* Busted hardware? */
131 WARN_ON_ONCE(1);
132 return NULL;
133 }
134 return pdev;
135 }
136
137 return tmp;
138}
139
140static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr) 106static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
141{ 107{
142 struct pci_bus *child; 108 struct pci_bus *child;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 6373985ad3f7..0482235eee92 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1652,7 +1652,7 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
1652 struct pci_dev_resource *fail_res; 1652 struct pci_dev_resource *fail_res;
1653 int retval; 1653 int retval;
1654 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | 1654 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
1655 IORESOURCE_PREFETCH; 1655 IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
1656 1656
1657again: 1657again:
1658 __pci_bus_size_bridges(parent, &add_list); 1658 __pci_bus_size_bridges(parent, &add_list);
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index ce4588851274..ee16f0c5c47d 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -32,7 +32,6 @@
32 32
33#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ 33#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
34 34
35#define PCI_VENDOR_ID_VMWARE 0x15AD
36#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0 35#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
37 36
38/* 37/*
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index e50790e91f76..1de3f94aa7de 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -727,7 +727,7 @@ static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
727 p_setd(perm, 0, ALL_VIRT, NO_WRITE); 727 p_setd(perm, 0, ALL_VIRT, NO_WRITE);
728 728
729 /* Writable bits mask */ 729 /* Writable bits mask */
730 mask = PCI_ERR_UNC_TRAIN | /* Training */ 730 mask = PCI_ERR_UNC_UND | /* Undefined */
731 PCI_ERR_UNC_DLP | /* Data Link Protocol */ 731 PCI_ERR_UNC_DLP | /* Data Link Protocol */
732 PCI_ERR_UNC_SURPDN | /* Surprise Down */ 732 PCI_ERR_UNC_SURPDN | /* Surprise Down */
733 PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */ 733 PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 259ba2661543..017069a455d4 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -133,7 +133,7 @@ static void pcistub_device_release(struct kref *kref)
133 xen_pcibk_config_free_dyn_fields(dev); 133 xen_pcibk_config_free_dyn_fields(dev);
134 xen_pcibk_config_free_dev(dev); 134 xen_pcibk_config_free_dev(dev);
135 135
136 dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 136 pci_clear_dev_assigned(dev);
137 pci_dev_put(dev); 137 pci_dev_put(dev);
138 138
139 kfree(psdev); 139 kfree(psdev);
@@ -413,7 +413,7 @@ static int pcistub_init_device(struct pci_dev *dev)
413 dev_dbg(&dev->dev, "reset device\n"); 413 dev_dbg(&dev->dev, "reset device\n");
414 xen_pcibk_reset_device(dev); 414 xen_pcibk_reset_device(dev);
415 415
416 dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; 416 pci_set_dev_assigned(dev);
417 return 0; 417 return 0;
418 418
419config_release: 419config_release:
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 975e1cc75edb..b8fdc57a7335 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -331,7 +331,7 @@ static inline void iounmap(void __iomem *addr)
331#ifndef CONFIG_GENERIC_IOMAP 331#ifndef CONFIG_GENERIC_IOMAP
332static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 332static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
333{ 333{
334 return (void __iomem *) port; 334 return PCI_IOBASE + (port & IO_SPACE_LIMIT);
335} 335}
336 336
337static inline void ioport_unmap(void __iomem *p) 337static inline void ioport_unmap(void __iomem *p)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 53b2acc38213..977e545a64c3 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -249,6 +249,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
249#define pgprot_writecombine pgprot_noncached 249#define pgprot_writecombine pgprot_noncached
250#endif 250#endif
251 251
252#ifndef pgprot_device
253#define pgprot_device pgprot_noncached
254#endif
255
252/* 256/*
253 * When walking page tables, get the address of the next boundary, 257 * When walking page tables, get the address of the next boundary,
254 * or the end address of the range if that comes earlier. Although no 258 * or the end address of the range if that comes earlier. Although no
diff --git a/include/linux/aer.h b/include/linux/aer.h
index c826d1c28f9c..4fef65e57023 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,8 @@
7#ifndef _AER_H_ 7#ifndef _AER_H_
8#define _AER_H_ 8#define _AER_H_
9 9
10#include <linux/types.h>
11
10#define AER_NONFATAL 0 12#define AER_NONFATAL 0
11#define AER_FATAL 1 13#define AER_FATAL 1
12#define AER_CORRECTABLE 2 14#define AER_CORRECTABLE 2
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 142ec544167c..2c5250222278 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s,
215 215
216/* Wrappers for managed devices */ 216/* Wrappers for managed devices */
217struct device; 217struct device;
218
219extern int devm_request_resource(struct device *dev, struct resource *root,
220 struct resource *new);
221extern void devm_release_resource(struct device *dev, struct resource *new);
222
218#define devm_request_region(dev,start,n,name) \ 223#define devm_request_region(dev,start,n,name) \
219 __devm_request_region(dev, &ioport_resource, (start), (n), (name)) 224 __devm_request_region(dev, &ioport_resource, (start), (n), (name))
220#define devm_request_mem_region(dev,start,n,name) \ 225#define devm_request_mem_region(dev,start,n,name) \
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8103f32f6d87..44f4746d033b 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -29,7 +29,6 @@ struct msi_desc {
29 __u8 multi_cap : 3; /* log2 num of messages supported */ 29 __u8 multi_cap : 3; /* log2 num of messages supported */
30 __u8 maskbit : 1; /* mask-pending bit supported ? */ 30 __u8 maskbit : 1; /* mask-pending bit supported ? */
31 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 31 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
32 __u8 pos; /* Location of the msi capability */
33 __u16 entry_nr; /* specific enabled entry */ 32 __u16 entry_nr; /* specific enabled entry */
34 unsigned default_irq; /* default pre-assigned irq */ 33 unsigned default_irq; /* default pre-assigned irq */
35 } msi_attrib; 34 } msi_attrib;
@@ -47,8 +46,6 @@ struct msi_desc {
47 46
48 /* Last set MSI message */ 47 /* Last set MSI message */
49 struct msi_msg msg; 48 struct msi_msg msg;
50
51 struct kobject kobj;
52}; 49};
53 50
54/* 51/*
@@ -60,7 +57,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
60void arch_teardown_msi_irq(unsigned int irq); 57void arch_teardown_msi_irq(unsigned int irq);
61int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 58int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
62void arch_teardown_msi_irqs(struct pci_dev *dev); 59void arch_teardown_msi_irqs(struct pci_dev *dev);
63int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
64void arch_restore_msi_irqs(struct pci_dev *dev); 60void arch_restore_msi_irqs(struct pci_dev *dev);
65 61
66void default_teardown_msi_irqs(struct pci_dev *dev); 62void default_teardown_msi_irqs(struct pci_dev *dev);
@@ -77,8 +73,6 @@ struct msi_chip {
77 int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, 73 int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
78 struct msi_desc *desc); 74 struct msi_desc *desc);
79 void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); 75 void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
80 int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
81 int nvec, int type);
82}; 76};
83 77
84#endif /* LINUX_MSI_H */ 78#endif /* LINUX_MSI_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index fb7b7221e063..8cb14eb393d6 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -23,17 +23,6 @@ struct of_pci_range {
23#define for_each_of_pci_range(parser, range) \ 23#define for_each_of_pci_range(parser, range) \
24 for (; of_pci_range_parser_one(parser, range);) 24 for (; of_pci_range_parser_one(parser, range);)
25 25
26static inline void of_pci_range_to_resource(struct of_pci_range *range,
27 struct device_node *np,
28 struct resource *res)
29{
30 res->flags = range->flags;
31 res->start = range->cpu_addr;
32 res->end = range->cpu_addr + range->size - 1;
33 res->parent = res->child = res->sibling = NULL;
34 res->name = np->full_name;
35}
36
37/* Translate a DMA address from device space to CPU space */ 26/* Translate a DMA address from device space to CPU space */
38extern u64 of_translate_dma_address(struct device_node *dev, 27extern u64 of_translate_dma_address(struct device_node *dev,
39 const __be32 *in_addr); 28 const __be32 *in_addr);
@@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
55extern const __be32 *of_get_address(struct device_node *dev, int index, 44extern const __be32 *of_get_address(struct device_node *dev, int index,
56 u64 *size, unsigned int *flags); 45 u64 *size, unsigned int *flags);
57 46
47extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
58extern unsigned long pci_address_to_pio(phys_addr_t addr); 48extern unsigned long pci_address_to_pio(phys_addr_t addr);
49extern phys_addr_t pci_pio_to_address(unsigned long pio);
59 50
60extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, 51extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
61 struct device_node *node); 52 struct device_node *node);
@@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
80 return NULL; 71 return NULL;
81} 72}
82 73
74static inline phys_addr_t pci_pio_to_address(unsigned long pio)
75{
76 return 0;
77}
78
83static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, 79static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
84 struct device_node *node) 80 struct device_node *node)
85{ 81{
@@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
138 u64 *size, unsigned int *flags); 134 u64 *size, unsigned int *flags);
139extern int of_pci_address_to_resource(struct device_node *dev, int bar, 135extern int of_pci_address_to_resource(struct device_node *dev, int bar,
140 struct resource *r); 136 struct resource *r);
137extern int of_pci_range_to_resource(struct of_pci_range *range,
138 struct device_node *np,
139 struct resource *res);
141#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ 140#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
142static inline int of_pci_address_to_resource(struct device_node *dev, int bar, 141static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
143 struct resource *r) 142 struct resource *r)
@@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev,
150{ 149{
151 return NULL; 150 return NULL;
152} 151}
152static inline int of_pci_range_to_resource(struct of_pci_range *range,
153 struct device_node *np,
154 struct resource *res)
155{
156 return -ENOSYS;
157}
153#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ 158#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
154 159
155#endif /* __OF_ADDRESS_H */ 160#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index dde3a4a0fa5d..1fd207e7a847 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
15int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); 16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
17int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 17int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
18int of_get_pci_domain_nr(struct device_node *node);
18#else 19#else
19static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) 20static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
20{ 21{
@@ -43,6 +44,18 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res)
43{ 44{
44 return -EINVAL; 45 return -EINVAL;
45} 46}
47
48static inline int
49of_get_pci_domain_nr(struct device_node *node)
50{
51 return -1;
52}
53#endif
54
55#if defined(CONFIG_OF_ADDRESS)
56int of_pci_get_host_bridge_resources(struct device_node *dev,
57 unsigned char busno, unsigned char bus_max,
58 struct list_head *resources, resource_size_t *io_base);
46#endif 59#endif
47 60
48#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) 61#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 96453f9bc8ba..5be8db45e368 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -45,7 +45,7 @@
45 * In the interest of not exposing interfaces to user-space unnecessarily, 45 * In the interest of not exposing interfaces to user-space unnecessarily,
46 * the following kernel-only defines are being added here. 46 * the following kernel-only defines are being added here.
47 */ 47 */
48#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn) 48#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
49/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 49/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
50#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 50#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
51 51
@@ -457,6 +457,9 @@ struct pci_bus {
457 unsigned char primary; /* number of primary bridge */ 457 unsigned char primary; /* number of primary bridge */
458 unsigned char max_bus_speed; /* enum pci_bus_speed */ 458 unsigned char max_bus_speed; /* enum pci_bus_speed */
459 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 459 unsigned char cur_bus_speed; /* enum pci_bus_speed */
460#ifdef CONFIG_PCI_DOMAINS_GENERIC
461 int domain_nr;
462#endif
460 463
461 char name[48]; 464 char name[48];
462 465
@@ -1103,6 +1106,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1103 resource_size_t), 1106 resource_size_t),
1104 void *alignf_data); 1107 void *alignf_data);
1105 1108
1109
1110int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1111
1106static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1112static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1107{ 1113{
1108 struct pci_bus_region region; 1114 struct pci_bus_region region;
@@ -1288,12 +1294,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
1288 */ 1294 */
1289#ifdef CONFIG_PCI_DOMAINS 1295#ifdef CONFIG_PCI_DOMAINS
1290extern int pci_domains_supported; 1296extern int pci_domains_supported;
1297int pci_get_new_domain_nr(void);
1291#else 1298#else
1292enum { pci_domains_supported = 0 }; 1299enum { pci_domains_supported = 0 };
1293static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1300static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1294static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } 1301static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1302static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1295#endif /* CONFIG_PCI_DOMAINS */ 1303#endif /* CONFIG_PCI_DOMAINS */
1296 1304
1305/*
1306 * Generic implementation for PCI domain support. If your
1307 * architecture does not need custom management of PCI
1308 * domains then this implementation will be used
1309 */
1310#ifdef CONFIG_PCI_DOMAINS_GENERIC
1311static inline int pci_domain_nr(struct pci_bus *bus)
1312{
1313 return bus->domain_nr;
1314}
1315void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
1316#else
1317static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
1318 struct device *parent)
1319{
1320}
1321#endif
1322
1297/* some architectures require additional setup to direct VGA traffic */ 1323/* some architectures require additional setup to direct VGA traffic */
1298typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1324typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1299 unsigned int command_bits, u32 flags); 1325 unsigned int command_bits, u32 flags);
@@ -1402,6 +1428,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1402 1428
1403static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1429static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1404static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1430static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1431static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1405 1432
1406#define dev_is_pci(d) (false) 1433#define dev_is_pci(d) (false)
1407#define dev_is_pf(d) (false) 1434#define dev_is_pf(d) (false)
@@ -1563,16 +1590,11 @@ enum pci_fixup_pass {
1563 1590
1564#ifdef CONFIG_PCI_QUIRKS 1591#ifdef CONFIG_PCI_QUIRKS
1565void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1592void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
1566struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
1567int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); 1593int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
1568void pci_dev_specific_enable_acs(struct pci_dev *dev); 1594void pci_dev_specific_enable_acs(struct pci_dev *dev);
1569#else 1595#else
1570static inline void pci_fixup_device(enum pci_fixup_pass pass, 1596static inline void pci_fixup_device(enum pci_fixup_pass pass,
1571 struct pci_dev *dev) { } 1597 struct pci_dev *dev) { }
1572static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
1573{
1574 return pci_dev_get(dev);
1575}
1576static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, 1598static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
1577 u16 acs_flags) 1599 u16 acs_flags)
1578{ 1600{
@@ -1707,7 +1729,7 @@ bool pci_acs_path_enabled(struct pci_dev *start,
1707 struct pci_dev *end, u16 acs_flags); 1729 struct pci_dev *end, u16 acs_flags);
1708 1730
1709#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 1731#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
1710#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) 1732#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
1711 1733
1712/* Large Resource Data Type Tag Item Names */ 1734/* Large Resource Data Type Tag Item Names */
1713#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ 1735#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
@@ -1834,15 +1856,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
1834 int (*fn)(struct pci_dev *pdev, 1856 int (*fn)(struct pci_dev *pdev,
1835 u16 alias, void *data), void *data); 1857 u16 alias, void *data), void *data);
1836 1858
1837/** 1859/* helper functions for operation of device flag */
1838 * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device 1860static inline void pci_set_dev_assigned(struct pci_dev *pdev)
1839 * @pdev: the PCI device 1861{
1840 * 1862 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
1841 * if the device is PCIE, return NULL 1863}
1842 * if the device isn't connected to a PCIe bridge (that is its parent is a 1864static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
1843 * legacy PCI bridge and the bridge is directly connected to bus 0), return its 1865{
1844 * parent 1866 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
1845 */ 1867}
1846struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); 1868static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
1847 1869{
1870 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
1871}
1848#endif /* LINUX_PCI_H */ 1872#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 5f2e559af6b0..2706ee9a4327 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -187,6 +187,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
187 return -ENODEV; 187 return -ENODEV;
188} 188}
189#endif 189#endif
190
191void pci_configure_slot(struct pci_dev *dev);
192#endif 190#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6ed0bb73a864..da9e6f753196 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2245,6 +2245,8 @@
2245#define PCI_VENDOR_ID_MORETON 0x15aa 2245#define PCI_VENDOR_ID_MORETON 0x15aa
2246#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 2246#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
2247 2247
2248#define PCI_VENDOR_ID_VMWARE 0x15ad
2249
2248#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 2250#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
2249#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 2251#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
2250 2252
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 47da53c27ffa..79abb9c71772 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -8,6 +8,7 @@
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9#include <linux/edac.h> 9#include <linux/edac.h>
10#include <linux/ktime.h> 10#include <linux/ktime.h>
11#include <linux/pci.h>
11#include <linux/aer.h> 12#include <linux/aer.h>
12#include <linux/cper.h> 13#include <linux/cper.h>
13 14
@@ -173,25 +174,34 @@ TRACE_EVENT(mc_event,
173 * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED 174 * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED
174 */ 175 */
175 176
176#define aer_correctable_errors \ 177#define aer_correctable_errors \
177 {BIT(0), "Receiver Error"}, \ 178 {PCI_ERR_COR_RCVR, "Receiver Error"}, \
178 {BIT(6), "Bad TLP"}, \ 179 {PCI_ERR_COR_BAD_TLP, "Bad TLP"}, \
179 {BIT(7), "Bad DLLP"}, \ 180 {PCI_ERR_COR_BAD_DLLP, "Bad DLLP"}, \
180 {BIT(8), "RELAY_NUM Rollover"}, \ 181 {PCI_ERR_COR_REP_ROLL, "RELAY_NUM Rollover"}, \
181 {BIT(12), "Replay Timer Timeout"}, \ 182 {PCI_ERR_COR_REP_TIMER, "Replay Timer Timeout"}, \
182 {BIT(13), "Advisory Non-Fatal"} 183 {PCI_ERR_COR_ADV_NFAT, "Advisory Non-Fatal Error"}, \
183 184 {PCI_ERR_COR_INTERNAL, "Corrected Internal Error"}, \
184#define aer_uncorrectable_errors \ 185 {PCI_ERR_COR_LOG_OVER, "Header Log Overflow"}
185 {BIT(4), "Data Link Protocol"}, \ 186
186 {BIT(12), "Poisoned TLP"}, \ 187#define aer_uncorrectable_errors \
187 {BIT(13), "Flow Control Protocol"}, \ 188 {PCI_ERR_UNC_UND, "Undefined"}, \
188 {BIT(14), "Completion Timeout"}, \ 189 {PCI_ERR_UNC_DLP, "Data Link Protocol Error"}, \
189 {BIT(15), "Completer Abort"}, \ 190 {PCI_ERR_UNC_SURPDN, "Surprise Down Error"}, \
190 {BIT(16), "Unexpected Completion"}, \ 191 {PCI_ERR_UNC_POISON_TLP,"Poisoned TLP"}, \
191 {BIT(17), "Receiver Overflow"}, \ 192 {PCI_ERR_UNC_FCP, "Flow Control Protocol Error"}, \
192 {BIT(18), "Malformed TLP"}, \ 193 {PCI_ERR_UNC_COMP_TIME, "Completion Timeout"}, \
193 {BIT(19), "ECRC"}, \ 194 {PCI_ERR_UNC_COMP_ABORT,"Completer Abort"}, \
194 {BIT(20), "Unsupported Request"} 195 {PCI_ERR_UNC_UNX_COMP, "Unexpected Completion"}, \
196 {PCI_ERR_UNC_RX_OVER, "Receiver Overflow"}, \
197 {PCI_ERR_UNC_MALF_TLP, "Malformed TLP"}, \
198 {PCI_ERR_UNC_ECRC, "ECRC Error"}, \
199 {PCI_ERR_UNC_UNSUP, "Unsupported Request Error"}, \
200 {PCI_ERR_UNC_ACSV, "ACS Violation"}, \
201 {PCI_ERR_UNC_INTN, "Uncorrectable Internal Error"},\
202 {PCI_ERR_UNC_MCBTLP, "MC Blocked TLP"}, \
203 {PCI_ERR_UNC_ATOMEG, "AtomicOp Egress Blocked"}, \
204 {PCI_ERR_UNC_TLPPRE, "TLP Prefix Blocked Error"}
195 205
196TRACE_EVENT(aer_event, 206TRACE_EVENT(aer_event,
197 TP_PROTO(const char *dev_name, 207 TP_PROTO(const char *dev_name,
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 30db069bce62..4a1d0cc38ff2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -552,6 +552,7 @@
552#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */ 552#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
553#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */ 553#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
554#define PCI_EXP_RTCAP 30 /* Root Capabilities */ 554#define PCI_EXP_RTCAP 30 /* Root Capabilities */
555#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
555#define PCI_EXP_RTSTA 32 /* Root Status */ 556#define PCI_EXP_RTSTA 32 /* Root Status */
556#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ 557#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
557#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ 558#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
@@ -630,7 +631,7 @@
630 631
631/* Advanced Error Reporting */ 632/* Advanced Error Reporting */
632#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ 633#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
633#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */ 634#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
634#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */ 635#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
635#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */ 636#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
636#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */ 637#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
diff --git a/kernel/resource.c b/kernel/resource.c
index 60c5a3856ab7..46322019ab7d 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1245,6 +1245,76 @@ int release_mem_region_adjustable(struct resource *parent,
1245/* 1245/*
1246 * Managed region resource 1246 * Managed region resource
1247 */ 1247 */
1248static void devm_resource_release(struct device *dev, void *ptr)
1249{
1250 struct resource **r = ptr;
1251
1252 release_resource(*r);
1253}
1254
1255/**
1256 * devm_request_resource() - request and reserve an I/O or memory resource
1257 * @dev: device for which to request the resource
1258 * @root: root of the resource tree from which to request the resource
1259 * @new: descriptor of the resource to request
1260 *
1261 * This is a device-managed version of request_resource(). There is usually
1262 * no need to release resources requested by this function explicitly since
1263 * that will be taken care of when the device is unbound from its driver.
1264 * If for some reason the resource needs to be released explicitly, because
1265 * of ordering issues for example, drivers must call devm_release_resource()
1266 * rather than the regular release_resource().
1267 *
1268 * When a conflict is detected between any existing resources and the newly
1269 * requested resource, an error message will be printed.
1270 *
1271 * Returns 0 on success or a negative error code on failure.
1272 */
1273int devm_request_resource(struct device *dev, struct resource *root,
1274 struct resource *new)
1275{
1276 struct resource *conflict, **ptr;
1277
1278 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1279 if (!ptr)
1280 return -ENOMEM;
1281
1282 *ptr = new;
1283
1284 conflict = request_resource_conflict(root, new);
1285 if (conflict) {
1286 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1287 new, conflict->name, conflict);
1288 devres_free(ptr);
1289 return -EBUSY;
1290 }
1291
1292 devres_add(dev, ptr);
1293 return 0;
1294}
1295EXPORT_SYMBOL(devm_request_resource);
1296
1297static int devm_resource_match(struct device *dev, void *res, void *data)
1298{
1299 struct resource **ptr = res;
1300
1301 return *ptr == data;
1302}
1303
1304/**
1305 * devm_release_resource() - release a previously requested resource
1306 * @dev: device for which to release the resource
1307 * @new: descriptor of the resource to release
1308 *
1309 * Releases a resource previously requested using devm_request_resource().
1310 */
1311void devm_release_resource(struct device *dev, struct resource *new)
1312{
1313 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1314 new));
1315}
1316EXPORT_SYMBOL(devm_release_resource);
1317
1248struct region_devres { 1318struct region_devres {
1249 struct resource *parent; 1319 struct resource *parent;
1250 resource_size_t start; 1320 resource_size_t start;
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 5819a2708d7e..e05000e200d2 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -302,7 +302,7 @@ static void kvm_free_assigned_device(struct kvm *kvm,
302 else 302 else
303 pci_restore_state(assigned_dev->dev); 303 pci_restore_state(assigned_dev->dev);
304 304
305 assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 305 pci_clear_dev_assigned(assigned_dev->dev);
306 306
307 pci_release_regions(assigned_dev->dev); 307 pci_release_regions(assigned_dev->dev);
308 pci_disable_device(assigned_dev->dev); 308 pci_disable_device(assigned_dev->dev);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 714b94932312..e723bb91aa34 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -203,7 +203,7 @@ int kvm_assign_device(struct kvm *kvm,
203 goto out_unmap; 203 goto out_unmap;
204 } 204 }
205 205
206 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; 206 pci_set_dev_assigned(pdev);
207 207
208 dev_info(&pdev->dev, "kvm assign device\n"); 208 dev_info(&pdev->dev, "kvm assign device\n");
209 209
@@ -229,7 +229,7 @@ int kvm_deassign_device(struct kvm *kvm,
229 229
230 iommu_detach_device(domain, &pdev->dev); 230 iommu_detach_device(domain, &pdev->dev);
231 231
232 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 232 pci_clear_dev_assigned(pdev);
233 233
234 dev_info(&pdev->dev, "kvm deassign device\n"); 234 dev_info(&pdev->dev, "kvm deassign device\n");
235 235