aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/PCI/pci-error-recovery.txt35
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt24
-rw-r--r--Documentation/devicetree/bindings/pci/mobiveil-pcie.txt73
-rw-r--r--Documentation/devicetree/bindings/pci/pci-armada8k.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt62
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt (renamed from Documentation/devicetree/bindings/pci/rockchip-pcie.txt)0
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/arm64/configs/defconfig3
-rw-r--r--arch/microblaze/include/asm/pci.h4
-rw-r--r--arch/microblaze/pci/pci-common.c61
-rw-r--r--arch/mips/pci/pci-legacy.c8
-rw-r--r--arch/sparc/kernel/leon_pci.c62
-rw-r--r--arch/sparc/kernel/pci.c136
-rw-r--r--arch/sparc/kernel/pci_common.c31
-rw-r--r--arch/sparc/kernel/pci_msi.c10
-rw-r--r--arch/sparc/kernel/pcic.c94
-rw-r--r--arch/x86/pci/early.c19
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/xtensa/include/asm/pci.h2
-rw-r--r--arch/xtensa/kernel/pci.c69
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c24
-rw-r--r--drivers/iommu/amd_iommu.c11
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/misc/pci_endpoint_test.c29
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c47
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/pci/Kconfig12
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/dwc/Kconfig88
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c19
-rw-r--r--drivers/pci/dwc/pci-imx6.c2
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c21
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c6
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c19
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c80
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c155
-rw-r--r--drivers/pci/dwc/pcie-designware.c22
-rw-r--r--drivers/pci/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/dwc/pcie-qcom.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c35
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c23
-rw-r--r--drivers/pci/host/Kconfig55
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c7
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-host-common.c13
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c162
-rw-r--r--drivers/pci/host/pci-mvebu.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pci-v3-semi.c5
-rw-r--r--drivers/pci/host/pci-versatile.c5
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-altera.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c5
-rw-r--r--drivers/pci/host/pcie-mediatek.c236
-rw-r--r--drivers/pci/host/pcie-mobiveil.c866
-rw-r--r--drivers/pci/host/pcie-rcar.c284
-rw-r--r--drivers/pci/host/pcie-rockchip-ep.c642
-rw-r--r--drivers/pci/host/pcie-rockchip-host.c1142
-rw-r--r--drivers/pci/host/pcie-rockchip.c1580
-rw-r--r--drivers/pci/host/pcie-rockchip.h338
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c6
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/host/vmd.c91
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c45
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c84
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c84
-rw-r--r--drivers/pci/hotplug/pnv_php.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h12
-rw-r--r--drivers/pci/hotplug/shpchp_core.c14
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c8
-rw-r--r--drivers/pci/iov.c42
-rw-r--r--drivers/pci/of.c63
-rw-r--r--drivers/pci/pci-acpi.c55
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-pf-stub.c54
-rw-r--r--drivers/pci/pci-sysfs.c15
-rw-r--r--drivers/pci/pci.c89
-rw-r--r--drivers/pci/pci.h45
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c11
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h32
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c397
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c38
-rw-r--r--drivers/pci/pcie/aspm.c9
-rw-r--r--drivers/pci/pcie/dpc.c74
-rw-r--r--drivers/pci/pcie/err.c388
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c57
-rw-r--r--drivers/pci/pcie/portdrv_core.c71
-rw-r--r--drivers/pci/probe.c96
-rw-r--r--drivers/pci/quirks.c1002
-rw-r--r--drivers/pci/setup-bus.c82
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/aer.h1
-rw-r--r--include/linux/of_pci.h34
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-epc.h8
-rw-r--r--include/linux/pci-epf.h4
-rw-r--r--include/linux/pci.h21
-rw-r--r--include/linux/pci_hotplug.h18
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/ras/ras_event.h22
-rw-r--r--include/uapi/linux/pci_regs.h6
120 files changed, 6172 insertions, 3846 deletions
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index 0b6bb3ef449e..688b69121e82 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -110,7 +110,7 @@ The actual steps taken by a platform to recover from a PCI error
110event will be platform-dependent, but will follow the general 110event will be platform-dependent, but will follow the general
111sequence described below. 111sequence described below.
112 112
113STEP 0: Error Event 113STEP 0: Error Event: ERR_NONFATAL
114------------------- 114-------------------
115A PCI bus error is detected by the PCI hardware. On powerpc, the slot 115A PCI bus error is detected by the PCI hardware. On powerpc, the slot
116is isolated, in that all I/O is blocked: all reads return 0xffffffff, 116is isolated, in that all I/O is blocked: all reads return 0xffffffff,
@@ -228,13 +228,7 @@ proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations).
228If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform 228If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform
229proceeds to STEP 4 (Slot Reset) 229proceeds to STEP 4 (Slot Reset)
230 230
231STEP 3: Link Reset 231STEP 3: Slot Reset
232------------------
233The platform resets the link. This is a PCI-Express specific step
234and is done whenever a fatal error has been detected that can be
235"solved" by resetting the link.
236
237STEP 4: Slot Reset
238------------------ 232------------------
239 233
240In response to a return value of PCI_ERS_RESULT_NEED_RESET, the 234In response to a return value of PCI_ERS_RESULT_NEED_RESET, the
@@ -320,7 +314,7 @@ Failure).
320>>> However, it probably should. 314>>> However, it probably should.
321 315
322 316
323STEP 5: Resume Operations 317STEP 4: Resume Operations
324------------------------- 318-------------------------
325The platform will call the resume() callback on all affected device 319The platform will call the resume() callback on all affected device
326drivers if all drivers on the segment have returned 320drivers if all drivers on the segment have returned
@@ -332,7 +326,7 @@ a result code.
332At this point, if a new error happens, the platform will restart 326At this point, if a new error happens, the platform will restart
333a new error recovery sequence. 327a new error recovery sequence.
334 328
335STEP 6: Permanent Failure 329STEP 5: Permanent Failure
336------------------------- 330-------------------------
337A "permanent failure" has occurred, and the platform cannot recover 331A "permanent failure" has occurred, and the platform cannot recover
338the device. The platform will call error_detected() with a 332the device. The platform will call error_detected() with a
@@ -355,6 +349,27 @@ errors. See the discussion in powerpc/eeh-pci-error-recovery.txt
355for additional detail on real-life experience of the causes of 349for additional detail on real-life experience of the causes of
356software errors. 350software errors.
357 351
352STEP 0: Error Event: ERR_FATAL
353-------------------
354PCI bus error is detected by the PCI hardware. On powerpc, the slot is
355isolated, in that all I/O is blocked: all reads return 0xffffffff, all
356writes are ignored.
357
358STEP 1: Remove devices
359--------------------
360Platform removes the devices depending on the error agent, it could be
361this port for all subordinates or upstream component (likely downstream
362port)
363
364STEP 2: Reset link
365--------------------
366The platform resets the link. This is a PCI-Express specific step and is
367done whenever a fatal error has been detected that can be "solved" by
368resetting the link.
369
370STEP 3: Re-enumerate the devices
371--------------------
372Initiates the re-enumeration.
358 373
359Conclusion; General Remarks 374Conclusion; General Remarks
360--------------------------- 375---------------------------
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1beb30d8d7fc..20cc45602f45 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3162,6 +3162,8 @@
3162 on: Turn realloc on 3162 on: Turn realloc on
3163 realloc same as realloc=on 3163 realloc same as realloc=on
3164 noari do not use PCIe ARI. 3164 noari do not use PCIe ARI.
3165 noats [PCIE, Intel-IOMMU, AMD-IOMMU]
3166 do not use PCIe ATS (and IOMMU device IOTLB).
3165 pcie_scan_all Scan all possible PCIe devices. Otherwise we 3167 pcie_scan_all Scan all possible PCIe devices. Otherwise we
3166 only look for one device below a PCIe downstream 3168 only look for one device below a PCIe downstream
3167 port. 3169 port.
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index 1da7ade3183c..c124f9bc11f3 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,7 +1,9 @@
1* Synopsys DesignWare PCIe interface 1* Synopsys DesignWare PCIe interface
2 2
3Required properties: 3Required properties:
4- compatible: should contain "snps,dw-pcie" to identify the core. 4- compatible:
5 "snps,dw-pcie" for RC mode;
6 "snps,dw-pcie-ep" for EP mode;
5- reg: Should contain the configuration address space. 7- reg: Should contain the configuration address space.
6- reg-names: Must be "config" for the PCIe configuration space. 8- reg-names: Must be "config" for the PCIe configuration space.
7 (The old way of getting the configuration address space from "ranges" 9 (The old way of getting the configuration address space from "ranges"
@@ -41,11 +43,11 @@ EP mode:
41 43
42Example configuration: 44Example configuration:
43 45
44 pcie: pcie@dffff000 { 46 pcie: pcie@dfc00000 {
45 compatible = "snps,dw-pcie"; 47 compatible = "snps,dw-pcie";
46 reg = <0xdffff000 0x1000>, /* Controller registers */ 48 reg = <0xdfc00000 0x0001000>, /* IP registers */
47 <0xd0000000 0x2000>; /* PCI config space */ 49 <0xd0000000 0x0002000>; /* Configuration space */
48 reg-names = "ctrlreg", "config"; 50 reg-names = "dbi", "config";
49 #address-cells = <3>; 51 #address-cells = <3>;
50 #size-cells = <2>; 52 #size-cells = <2>;
51 device_type = "pci"; 53 device_type = "pci";
@@ -54,5 +56,15 @@ Example configuration:
54 interrupts = <25>, <24>; 56 interrupts = <25>, <24>;
55 #interrupt-cells = <1>; 57 #interrupt-cells = <1>;
56 num-lanes = <1>; 58 num-lanes = <1>;
57 num-viewport = <3>; 59 };
60or
61 pcie: pcie@dfc00000 {
62 compatible = "snps,dw-pcie-ep";
63 reg = <0xdfc00000 0x0001000>, /* IP registers 1 */
64 <0xdfc01000 0x0001000>, /* IP registers 2 */
65 <0xd0000000 0x2000000>; /* Configuration space */
66 reg-names = "dbi", "dbi2", "addr_space";
67 num-ib-windows = <6>;
68 num-ob-windows = <2>;
69 num-lanes = <1>;
58 }; 70 };
diff --git a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
new file mode 100644
index 000000000000..65038aa642e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
@@ -0,0 +1,73 @@
1* Mobiveil AXI PCIe Root Port Bridge DT description
2
3Mobiveil's GPEX 4.0 is a PCIe Gen4 root port bridge IP. This configurable IP
4has up to 8 outbound and inbound windows for the address translation.
5
6Required properties:
7- #address-cells: Address representation for root ports, set to <3>
8- #size-cells: Size representation for root ports, set to <2>
9- #interrupt-cells: specifies the number of cells needed to encode an
10 interrupt source. The value must be 1.
11- compatible: Should contain "mbvl,gpex40-pcie"
12- reg: Should contain PCIe registers location and length
13 "config_axi_slave": PCIe controller registers
14 "csr_axi_slave" : Bridge config registers
15 "gpio_slave" : GPIO registers to control slot power
16 "apb_csr" : MSI registers
17
18- device_type: must be "pci"
19- apio-wins : number of requested apio outbound windows
20 default 2 outbound windows are configured -
21 1. Config window
22 2. Memory window
23- ppio-wins : number of requested ppio inbound windows
24 default 1 inbound memory window is configured.
25- bus-range: PCI bus numbers covered
26- interrupt-controller: identifies the node as an interrupt controller
27- #interrupt-cells: specifies the number of cells needed to encode an
28 interrupt source. The value must be 1.
29- interrupt-parent : phandle to the interrupt controller that
30 it is attached to, it should be set to gic to point to
31 ARM's Generic Interrupt Controller node in system DT.
32- interrupts: The interrupt line of the PCIe controller
33 last cell of this field is set to 4 to
34 denote it as IRQ_TYPE_LEVEL_HIGH type interrupt.
35- interrupt-map-mask,
36 interrupt-map: standard PCI properties to define the mapping of the
37 PCI interface to interrupt numbers.
38- ranges: ranges for the PCI memory regions (I/O space region is not
39 supported by hardware)
40 Please refer to the standard PCI bus binding document for a more
41 detailed explanation
42
43
44Example:
45++++++++
46 pcie0: pcie@a0000000 {
47 #address-cells = <3>;
48 #size-cells = <2>;
49 compatible = "mbvl,gpex40-pcie";
50 reg = <0xa0000000 0x00001000>,
51 <0xb0000000 0x00010000>,
52 <0xff000000 0x00200000>,
53 <0xb0010000 0x00001000>;
54 reg-names = "config_axi_slave",
55 "csr_axi_slave",
56 "gpio_slave",
57 "apb_csr";
58 device_type = "pci";
59 apio-wins = <2>;
60 ppio-wins = <1>;
61 bus-range = <0x00000000 0x000000ff>;
62 interrupt-controller;
63 interrupt-parent = <&gic>;
64 #interrupt-cells = <1>;
65 interrupts = < 0 89 4 >;
66 interrupt-map-mask = <0 0 0 7>;
67 interrupt-map = <0 0 0 0 &pci_express 0>,
68 <0 0 0 1 &pci_express 1>,
69 <0 0 0 2 &pci_express 2>,
70 <0 0 0 3 &pci_express 3>;
71 ranges = < 0x83000000 0 0x00000000 0xa8000000 0 0x8000000>;
72
73 };
diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
index c1e4c3d10a74..9e3fc15e1af8 100644
--- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt
+++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
@@ -12,7 +12,10 @@ Required properties:
12 - "ctrl" for the control register region 12 - "ctrl" for the control register region
13 - "config" for the config space region 13 - "config" for the config space region
14- interrupts: Interrupt specifier for the PCIe controler 14- interrupts: Interrupt specifier for the PCIe controler
15- clocks: reference to the PCIe controller clock 15- clocks: reference to the PCIe controller clocks
16- clock-names: mandatory if there is a second clock, in this case the
17 name must be "core" for the first clock and "reg" for the second
18 one
16 19
17Example: 20Example:
18 21
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 1fb614e615da..a5f7fc62d10e 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -8,6 +8,7 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
8 "renesas,pcie-r8a7793" for the R8A7793 SoC; 8 "renesas,pcie-r8a7793" for the R8A7793 SoC;
9 "renesas,pcie-r8a7795" for the R8A7795 SoC; 9 "renesas,pcie-r8a7795" for the R8A7795 SoC;
10 "renesas,pcie-r8a7796" for the R8A7796 SoC; 10 "renesas,pcie-r8a7796" for the R8A7796 SoC;
11 "renesas,pcie-r8a77980" for the R8A77980 SoC;
11 "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or 12 "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or
12 RZ/G1 compatible device. 13 RZ/G1 compatible device.
13 "renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device. 14 "renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device.
@@ -32,6 +33,11 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
32 and PCIe bus clocks. 33 and PCIe bus clocks.
33- clock-names: from common clock binding: should be "pcie" and "pcie_bus". 34- clock-names: from common clock binding: should be "pcie" and "pcie_bus".
34 35
36Optional properties:
37- phys: from common PHY binding: PHY phandle and specifier (only make sense
38 for R-Car gen3 SoCs where the PCIe PHYs have their own register blocks).
39- phy-names: from common PHY binding: should be "pcie".
40
35Example: 41Example:
36 42
37SoC-specific DT Entry: 43SoC-specific DT Entry:
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt
new file mode 100644
index 000000000000..778467307a93
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt
@@ -0,0 +1,62 @@
1* Rockchip AXI PCIe Endpoint Controller DT description
2
3Required properties:
4- compatible: Should contain "rockchip,rk3399-pcie-ep"
5- reg: Two register ranges as listed in the reg-names property
6- reg-names: Must include the following names
7 - "apb-base"
8 - "mem-base"
9- clocks: Must contain an entry for each entry in clock-names.
10 See ../clocks/clock-bindings.txt for details.
11- clock-names: Must include the following entries:
12 - "aclk"
13 - "aclk-perf"
14 - "hclk"
15 - "pm"
16- resets: Must contain seven entries for each entry in reset-names.
17 See ../reset/reset.txt for details.
18- reset-names: Must include the following names
19 - "core"
20 - "mgmt"
21 - "mgmt-sticky"
22 - "pipe"
23 - "pm"
24 - "aclk"
25 - "pclk"
26- pinctrl-names : The pin control state names
27- pinctrl-0: The "default" pinctrl state
28- phys: Must contain an phandle to a PHY for each entry in phy-names.
29- phy-names: Must include 4 entries for all 4 lanes even if some of
30 them won't be used for your cases. Entries are of the form "pcie-phy-N":
31 where N ranges from 0 to 3.
32 (see example below and you MUST also refer to ../phy/rockchip-pcie-phy.txt
33 for changing the #phy-cells of phy node to support it)
34- rockchip,max-outbound-regions: Maximum number of outbound regions
35
36Optional Property:
37- num-lanes: number of lanes to use
38- max-functions: Maximum number of functions that can be configured (default 1).
39
40pcie0-ep: pcie@f8000000 {
41 compatible = "rockchip,rk3399-pcie-ep";
42 #address-cells = <3>;
43 #size-cells = <2>;
44 rockchip,max-outbound-regions = <16>;
45 clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
46 <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
47 clock-names = "aclk", "aclk-perf",
48 "hclk", "pm";
49 max-functions = /bits/ 8 <8>;
50 num-lanes = <4>;
51 reg = <0x0 0xfd000000 0x0 0x1000000>, <0x0 0x80000000 0x0 0x20000>;
52 reg-names = "apb-base", "mem-base";
53 resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
54 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
55 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
56 reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
57 "pm", "pclk", "aclk";
58 phys = <&pcie_phy 0>, <&pcie_phy 1>, <&pcie_phy 2>, <&pcie_phy 3>;
59 phy-names = "pcie-phy-0", "pcie-phy-1", "pcie-phy-2", "pcie-phy-3";
60 pinctrl-names = "default";
61 pinctrl-0 = <&pcie_clkreq>;
62};
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt
index af34c65773fd..af34c65773fd 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 36003832c2a8..74e9db9e9088 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -205,6 +205,7 @@ lwn Liebherr-Werk Nenzing GmbH
205macnica Macnica Americas 205macnica Macnica Americas
206marvell Marvell Technology Group Ltd. 206marvell Marvell Technology Group Ltd.
207maxim Maxim Integrated Products 207maxim Maxim Integrated Products
208mbvl Mobiveil Inc.
208mcube mCube 209mcube mCube
209meas Measurement Specialties 210meas Measurement Specialties
210mediatek MediaTek Inc. 211mediatek MediaTek Inc.
diff --git a/MAINTAINERS b/MAINTAINERS
index dc241b04d1bd..b4a564213cdf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9484,6 +9484,13 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
9484S: Maintained 9484S: Maintained
9485F: drivers/media/dvb-frontends/mn88473* 9485F: drivers/media/dvb-frontends/mn88473*
9486 9486
9487PCI DRIVER FOR MOBIVEIL PCIE IP
9488M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
9489L: linux-pci@vger.kernel.org
9490S: Supported
9491F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
9492F: drivers/pci/host/pcie-mobiveil.c
9493
9487MODULE SUPPORT 9494MODULE SUPPORT
9488M: Jessica Yu <jeyu@kernel.org> 9495M: Jessica Yu <jeyu@kernel.org>
9489T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next 9496T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -10826,9 +10833,9 @@ F: Documentation/devicetree/bindings/pci/cdns,*.txt
10826F: drivers/pci/cadence/pcie-cadence* 10833F: drivers/pci/cadence/pcie-cadence*
10827 10834
10828PCI DRIVER FOR FREESCALE LAYERSCAPE 10835PCI DRIVER FOR FREESCALE LAYERSCAPE
10829M: Minghuan Lian <minghuan.Lian@freescale.com> 10836M: Minghuan Lian <minghuan.Lian@nxp.com>
10830M: Mingkai Hu <mingkai.hu@freescale.com> 10837M: Mingkai Hu <mingkai.hu@nxp.com>
10831M: Roy Zang <tie-fei.zang@freescale.com> 10838M: Roy Zang <roy.zang@nxp.com>
10832L: linuxppc-dev@lists.ozlabs.org 10839L: linuxppc-dev@lists.ozlabs.org
10833L: linux-pci@vger.kernel.org 10840L: linux-pci@vger.kernel.org
10834L: linux-arm-kernel@lists.infradead.org 10841L: linux-arm-kernel@lists.infradead.org
@@ -11054,8 +11061,8 @@ M: Shawn Lin <shawn.lin@rock-chips.com>
11054L: linux-pci@vger.kernel.org 11061L: linux-pci@vger.kernel.org
11055L: linux-rockchip@lists.infradead.org 11062L: linux-rockchip@lists.infradead.org
11056S: Maintained 11063S: Maintained
11057F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt 11064F: Documentation/devicetree/bindings/pci/rockchip-pcie*
11058F: drivers/pci/host/pcie-rockchip.c 11065F: drivers/pci/host/pcie-rockchip*
11059 11066
11060PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC 11067PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC
11061M: Linus Walleij <linus.walleij@linaro.org> 11068M: Linus Walleij <linus.walleij@linaro.org>
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ecf613761e78..17ea72b1b389 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -78,7 +78,8 @@ CONFIG_PCIE_ARMADA_8K=y
78CONFIG_PCI_AARDVARK=y 78CONFIG_PCI_AARDVARK=y
79CONFIG_PCI_TEGRA=y 79CONFIG_PCI_TEGRA=y
80CONFIG_PCIE_RCAR=y 80CONFIG_PCIE_RCAR=y
81CONFIG_PCIE_ROCKCHIP=m 81CONFIG_PCIE_ROCKCHIP=y
82CONFIG_PCIE_ROCKCHIP_HOST=m
82CONFIG_PCI_HOST_GENERIC=y 83CONFIG_PCI_HOST_GENERIC=y
83CONFIG_PCI_XGENE=y 84CONFIG_PCI_XGENE=y
84CONFIG_PCI_HOST_THUNDER_PEM=y 85CONFIG_PCI_HOST_THUNDER_PEM=y
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 66cf3a5a2f83..859c19828dd4 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -61,10 +61,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
61 61
62#define HAVE_PCI_LEGACY 1 62#define HAVE_PCI_LEGACY 1
63 63
64extern void pcibios_claim_one_bus(struct pci_bus *b);
65
66extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
67
68extern void pcibios_resource_survey(void); 64extern void pcibios_resource_survey(void);
69 65
70struct file; 66struct file;
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 161f9758c631..f34346d56095 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -915,67 +915,6 @@ void __init pcibios_resource_survey(void)
915 pci_assign_unassigned_resources(); 915 pci_assign_unassigned_resources();
916} 916}
917 917
918/* This is used by the PCI hotplug driver to allocate resource
919 * of newly plugged busses. We can try to consolidate with the
920 * rest of the code later, for now, keep it as-is as our main
921 * resource allocation function doesn't deal with sub-trees yet.
922 */
923void pcibios_claim_one_bus(struct pci_bus *bus)
924{
925 struct pci_dev *dev;
926 struct pci_bus *child_bus;
927
928 list_for_each_entry(dev, &bus->devices, bus_list) {
929 int i;
930
931 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
932 struct resource *r = &dev->resource[i];
933
934 if (r->parent || !r->start || !r->flags)
935 continue;
936
937 pr_debug("PCI: Claiming %s: ", pci_name(dev));
938 pr_debug("Resource %d: %016llx..%016llx [%x]\n",
939 i, (unsigned long long)r->start,
940 (unsigned long long)r->end,
941 (unsigned int)r->flags);
942
943 if (pci_claim_resource(dev, i) == 0)
944 continue;
945
946 pci_claim_bridge_resource(dev, i);
947 }
948 }
949
950 list_for_each_entry(child_bus, &bus->children, node)
951 pcibios_claim_one_bus(child_bus);
952}
953EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
954
955
956/* pcibios_finish_adding_to_bus
957 *
958 * This is to be called by the hotplug code after devices have been
959 * added to a bus, this include calling it for a PHB that is just
960 * being added
961 */
962void pcibios_finish_adding_to_bus(struct pci_bus *bus)
963{
964 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
965 pci_domain_nr(bus), bus->number);
966
967 /* Allocate bus and devices resources */
968 pcibios_allocate_bus_resources(bus);
969 pcibios_claim_one_bus(bus);
970
971 /* Add new devices to global lists. Register in proc, sysfs. */
972 pci_bus_add_devices(bus);
973
974 /* Fixup EEH */
975 /* eeh_add_device_tree_late(bus); */
976}
977EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
978
979static void pcibios_setup_phb_resources(struct pci_controller *hose, 918static void pcibios_setup_phb_resources(struct pci_controller *hose,
980 struct list_head *resources) 919 struct list_head *resources)
981{ 920{
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 0c65c38e05d6..f1e92bf743c2 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -263,9 +263,8 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
263 (!(r->flags & IORESOURCE_ROM_ENABLE))) 263 (!(r->flags & IORESOURCE_ROM_ENABLE)))
264 continue; 264 continue;
265 if (!r->start && r->end) { 265 if (!r->start && r->end) {
266 printk(KERN_ERR "PCI: Device %s not available " 266 pci_err(dev,
267 "because of resource collisions\n", 267 "can't enable device: resource collisions\n");
268 pci_name(dev));
269 return -EINVAL; 268 return -EINVAL;
270 } 269 }
271 if (r->flags & IORESOURCE_IO) 270 if (r->flags & IORESOURCE_IO)
@@ -274,8 +273,7 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
274 cmd |= PCI_COMMAND_MEMORY; 273 cmd |= PCI_COMMAND_MEMORY;
275 } 274 }
276 if (cmd != old_cmd) { 275 if (cmd != old_cmd) {
277 printk("PCI: Enabling device %s (%04x -> %04x)\n", 276 pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
278 pci_name(dev), old_cmd, cmd);
279 pci_write_config_word(dev, PCI_COMMAND, cmd); 277 pci_write_config_word(dev, PCI_COMMAND, cmd);
280 } 278 }
281 return 0; 279 return 0;
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 15b59169c535..e5e5ff6b9a5c 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -60,50 +60,30 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
60 pci_bus_add_devices(root_bus); 60 pci_bus_add_devices(root_bus);
61} 61}
62 62
63void pcibios_fixup_bus(struct pci_bus *pbus) 63int pcibios_enable_device(struct pci_dev *dev, int mask)
64{ 64{
65 struct pci_dev *dev; 65 u16 cmd, oldcmd;
66 int i, has_io, has_mem; 66 int i;
67 u16 cmd;
68 67
69 list_for_each_entry(dev, &pbus->devices, bus_list) { 68 pci_read_config_word(dev, PCI_COMMAND, &cmd);
70 /* 69 oldcmd = cmd;
71 * We can not rely on that the bootloader has enabled I/O 70
72 * or memory access to PCI devices. Instead we enable it here 71 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
73 * if the device has BARs of respective type. 72 struct resource *res = &dev->resource[i];
74 */ 73
75 has_io = has_mem = 0; 74 /* Only set up the requested stuff */
76 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 75 if (!(mask & (1<<i)))
77 unsigned long f = dev->resource[i].flags; 76 continue;
78 if (f & IORESOURCE_IO) 77
79 has_io = 1; 78 if (res->flags & IORESOURCE_IO)
80 else if (f & IORESOURCE_MEM)
81 has_mem = 1;
82 }
83 /* ROM BARs are mapped into 32-bit memory space */
84 if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
85 dev->resource[PCI_ROM_RESOURCE].flags |=
86 IORESOURCE_ROM_ENABLE;
87 has_mem = 1;
88 }
89 pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
90 if (has_io && !(cmd & PCI_COMMAND_IO)) {
91#ifdef CONFIG_PCI_DEBUG
92 printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
93 pci_name(dev));
94#endif
95 cmd |= PCI_COMMAND_IO; 79 cmd |= PCI_COMMAND_IO;
96 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, 80 if (res->flags & IORESOURCE_MEM)
97 cmd);
98 }
99 if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
100#ifdef CONFIG_PCI_DEBUG
101 printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
102 "%s\n", pci_name(dev));
103#endif
104 cmd |= PCI_COMMAND_MEMORY; 81 cmd |= PCI_COMMAND_MEMORY;
105 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
106 cmd);
107 }
108 } 82 }
83
84 if (cmd != oldcmd) {
85 pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
86 pci_write_config_word(dev, PCI_COMMAND, cmd);
87 }
88 return 0;
109} 89}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 41b20edb427d..17ea16a1337c 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -214,8 +214,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
214 if (!addrs) 214 if (!addrs)
215 return; 215 return;
216 if (ofpci_verbose) 216 if (ofpci_verbose)
217 printk(" parse addresses (%d bytes) @ %p\n", 217 pci_info(dev, " parse addresses (%d bytes) @ %p\n",
218 proplen, addrs); 218 proplen, addrs);
219 op_res = &op->resource[0]; 219 op_res = &op->resource[0];
220 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { 220 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
221 struct resource *res; 221 struct resource *res;
@@ -227,8 +227,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
227 continue; 227 continue;
228 i = addrs[0] & 0xff; 228 i = addrs[0] & 0xff;
229 if (ofpci_verbose) 229 if (ofpci_verbose)
230 printk(" start: %llx, end: %llx, i: %x\n", 230 pci_info(dev, " start: %llx, end: %llx, i: %x\n",
231 op_res->start, op_res->end, i); 231 op_res->start, op_res->end, i);
232 232
233 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 233 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
234 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 234 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -236,13 +236,15 @@ static void pci_parse_of_addrs(struct platform_device *op,
236 res = &dev->resource[PCI_ROM_RESOURCE]; 236 res = &dev->resource[PCI_ROM_RESOURCE];
237 flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; 237 flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
238 } else { 238 } else {
239 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 239 pci_err(dev, "bad cfg reg num 0x%x\n", i);
240 continue; 240 continue;
241 } 241 }
242 res->start = op_res->start; 242 res->start = op_res->start;
243 res->end = op_res->end; 243 res->end = op_res->end;
244 res->flags = flags; 244 res->flags = flags;
245 res->name = pci_name(dev); 245 res->name = pci_name(dev);
246
247 pci_info(dev, "reg 0x%x: %pR\n", i, res);
246 } 248 }
247} 249}
248 250
@@ -289,8 +291,8 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
289 type = ""; 291 type = "";
290 292
291 if (ofpci_verbose) 293 if (ofpci_verbose)
292 printk(" create device, devfn: %x, type: %s\n", 294 pci_info(bus," create device, devfn: %x, type: %s\n",
293 devfn, type); 295 devfn, type);
294 296
295 dev->sysdata = node; 297 dev->sysdata = node;
296 dev->dev.parent = bus->bridge; 298 dev->dev.parent = bus->bridge;
@@ -323,10 +325,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
323 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), 325 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
324 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 326 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
325 327
326 if (ofpci_verbose)
327 printk(" class: 0x%x device name: %s\n",
328 dev->class, pci_name(dev));
329
330 /* I have seen IDE devices which will not respond to 328 /* I have seen IDE devices which will not respond to
331 * the bmdma simplex check reads if bus mastering is 329 * the bmdma simplex check reads if bus mastering is
332 * disabled. 330 * disabled.
@@ -353,10 +351,13 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
353 dev->irq = PCI_IRQ_NONE; 351 dev->irq = PCI_IRQ_NONE;
354 } 352 }
355 353
354 pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
355 dev->vendor, dev->device, dev->hdr_type, dev->class);
356
356 pci_parse_of_addrs(sd->op, node, dev); 357 pci_parse_of_addrs(sd->op, node, dev);
357 358
358 if (ofpci_verbose) 359 if (ofpci_verbose)
359 printk(" adding to system ...\n"); 360 pci_info(dev, " adding to system ...\n");
360 361
361 pci_device_add(dev, bus); 362 pci_device_add(dev, bus);
362 363
@@ -430,19 +431,19 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
430 u64 size; 431 u64 size;
431 432
432 if (ofpci_verbose) 433 if (ofpci_verbose)
433 printk("of_scan_pci_bridge(%s)\n", node->full_name); 434 pci_info(dev, "of_scan_pci_bridge(%s)\n", node->full_name);
434 435
435 /* parse bus-range property */ 436 /* parse bus-range property */
436 busrange = of_get_property(node, "bus-range", &len); 437 busrange = of_get_property(node, "bus-range", &len);
437 if (busrange == NULL || len != 8) { 438 if (busrange == NULL || len != 8) {
438 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 439 pci_info(dev, "Can't get bus-range for PCI-PCI bridge %s\n",
439 node->full_name); 440 node->full_name);
440 return; 441 return;
441 } 442 }
442 443
443 if (ofpci_verbose) 444 if (ofpci_verbose)
444 printk(" Bridge bus range [%u --> %u]\n", 445 pci_info(dev, " Bridge bus range [%u --> %u]\n",
445 busrange[0], busrange[1]); 446 busrange[0], busrange[1]);
446 447
447 ranges = of_get_property(node, "ranges", &len); 448 ranges = of_get_property(node, "ranges", &len);
448 simba = 0; 449 simba = 0;
@@ -454,8 +455,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
454 455
455 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 456 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
456 if (!bus) { 457 if (!bus) {
457 printk(KERN_ERR "Failed to create pci bus for %s\n", 458 pci_err(dev, "Failed to create pci bus for %s\n",
458 node->full_name); 459 node->full_name);
459 return; 460 return;
460 } 461 }
461 462
@@ -464,8 +465,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
464 bus->bridge_ctl = 0; 465 bus->bridge_ctl = 0;
465 466
466 if (ofpci_verbose) 467 if (ofpci_verbose)
467 printk(" Bridge ranges[%p] simba[%d]\n", 468 pci_info(dev, " Bridge ranges[%p] simba[%d]\n",
468 ranges, simba); 469 ranges, simba);
469 470
470 /* parse ranges property, or cook one up by hand for Simba */ 471 /* parse ranges property, or cook one up by hand for Simba */
471 /* PCI #address-cells == 3 and #size-cells == 2 always */ 472 /* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -487,10 +488,10 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
487 u64 start; 488 u64 start;
488 489
489 if (ofpci_verbose) 490 if (ofpci_verbose)
490 printk(" RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:" 491 pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
491 "%08x:%08x]\n", 492 "%08x:%08x]\n",
492 ranges[0], ranges[1], ranges[2], ranges[3], 493 ranges[0], ranges[1], ranges[2], ranges[3],
493 ranges[4], ranges[5], ranges[6], ranges[7]); 494 ranges[4], ranges[5], ranges[6], ranges[7]);
494 495
495 flags = pci_parse_of_flags(ranges[0]); 496 flags = pci_parse_of_flags(ranges[0]);
496 size = GET_64BIT(ranges, 6); 497 size = GET_64BIT(ranges, 6);
@@ -510,14 +511,14 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
510 if (flags & IORESOURCE_IO) { 511 if (flags & IORESOURCE_IO) {
511 res = bus->resource[0]; 512 res = bus->resource[0];
512 if (res->flags) { 513 if (res->flags) {
513 printk(KERN_ERR "PCI: ignoring extra I/O range" 514 pci_err(dev, "ignoring extra I/O range"
514 " for bridge %s\n", node->full_name); 515 " for bridge %s\n", node->full_name);
515 continue; 516 continue;
516 } 517 }
517 } else { 518 } else {
518 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 519 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
519 printk(KERN_ERR "PCI: too many memory ranges" 520 pci_err(dev, "too many memory ranges"
520 " for bridge %s\n", node->full_name); 521 " for bridge %s\n", node->full_name);
521 continue; 522 continue;
522 } 523 }
523 res = bus->resource[i]; 524 res = bus->resource[i];
@@ -529,8 +530,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
529 region.end = region.start + size - 1; 530 region.end = region.start + size - 1;
530 531
531 if (ofpci_verbose) 532 if (ofpci_verbose)
532 printk(" Using flags[%08x] start[%016llx] size[%016llx]\n", 533 pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n",
533 flags, start, size); 534 flags, start, size);
534 535
535 pcibios_bus_to_resource(dev->bus, res, &region); 536 pcibios_bus_to_resource(dev->bus, res, &region);
536 } 537 }
@@ -538,7 +539,7 @@ after_ranges:
538 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 539 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
539 bus->number); 540 bus->number);
540 if (ofpci_verbose) 541 if (ofpci_verbose)
541 printk(" bus name: %s\n", bus->name); 542 pci_info(dev, " bus name: %s\n", bus->name);
542 543
543 pci_of_scan_bus(pbm, node, bus); 544 pci_of_scan_bus(pbm, node, bus);
544} 545}
@@ -553,14 +554,14 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
553 struct pci_dev *dev; 554 struct pci_dev *dev;
554 555
555 if (ofpci_verbose) 556 if (ofpci_verbose)
556 printk("PCI: scan_bus[%s] bus no %d\n", 557 pci_info(bus, "scan_bus[%s] bus no %d\n",
557 node->full_name, bus->number); 558 node->full_name, bus->number);
558 559
559 child = NULL; 560 child = NULL;
560 prev_devfn = -1; 561 prev_devfn = -1;
561 while ((child = of_get_next_child(node, child)) != NULL) { 562 while ((child = of_get_next_child(node, child)) != NULL) {
562 if (ofpci_verbose) 563 if (ofpci_verbose)
563 printk(" * %s\n", child->full_name); 564 pci_info(bus, " * %s\n", child->full_name);
564 reg = of_get_property(child, "reg", &reglen); 565 reg = of_get_property(child, "reg", &reglen);
565 if (reg == NULL || reglen < 20) 566 if (reg == NULL || reglen < 20)
566 continue; 567 continue;
@@ -581,8 +582,7 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
581 if (!dev) 582 if (!dev)
582 continue; 583 continue;
583 if (ofpci_verbose) 584 if (ofpci_verbose)
584 printk("PCI: dev header type: %x\n", 585 pci_info(dev, "dev header type: %x\n", dev->hdr_type);
585 dev->hdr_type);
586 586
587 if (pci_is_bridge(dev)) 587 if (pci_is_bridge(dev))
588 of_scan_pci_bridge(pbm, child, dev); 588 of_scan_pci_bridge(pbm, child, dev);
@@ -624,6 +624,45 @@ static void pci_bus_register_of_sysfs(struct pci_bus *bus)
624 pci_bus_register_of_sysfs(child_bus); 624 pci_bus_register_of_sysfs(child_bus);
625} 625}
626 626
627static void pci_claim_legacy_resources(struct pci_dev *dev)
628{
629 struct pci_bus_region region;
630 struct resource *p, *root, *conflict;
631
632 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
633 return;
634
635 p = kzalloc(sizeof(*p), GFP_KERNEL);
636 if (!p)
637 return;
638
639 p->name = "Video RAM area";
640 p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
641
642 region.start = 0xa0000UL;
643 region.end = region.start + 0x1ffffUL;
644 pcibios_bus_to_resource(dev->bus, p, &region);
645
646 root = pci_find_parent_resource(dev, p);
647 if (!root) {
648 pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
649 goto err;
650 }
651
652 conflict = request_resource_conflict(root, p);
653 if (conflict) {
654 pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
655 p, conflict->name, conflict);
656 goto err;
657 }
658
659 pci_info(dev, "VGA legacy framebuffer %pR\n", p);
660 return;
661
662err:
663 kfree(p);
664}
665
627static void pci_claim_bus_resources(struct pci_bus *bus) 666static void pci_claim_bus_resources(struct pci_bus *bus)
628{ 667{
629 struct pci_bus *child_bus; 668 struct pci_bus *child_bus;
@@ -639,15 +678,13 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
639 continue; 678 continue;
640 679
641 if (ofpci_verbose) 680 if (ofpci_verbose)
642 printk("PCI: Claiming %s: " 681 pci_info(dev, "Claiming Resource %d: %pR\n",
643 "Resource %d: %016llx..%016llx [%x]\n", 682 i, r);
644 pci_name(dev), i,
645 (unsigned long long)r->start,
646 (unsigned long long)r->end,
647 (unsigned int)r->flags);
648 683
649 pci_claim_resource(dev, i); 684 pci_claim_resource(dev, i);
650 } 685 }
686
687 pci_claim_legacy_resources(dev);
651 } 688 }
652 689
653 list_for_each_entry(child_bus, &bus->children, node) 690 list_for_each_entry(child_bus, &bus->children, node)
@@ -687,6 +724,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
687 pci_bus_register_of_sysfs(bus); 724 pci_bus_register_of_sysfs(bus);
688 725
689 pci_claim_bus_resources(bus); 726 pci_claim_bus_resources(bus);
727
690 pci_bus_add_devices(bus); 728 pci_bus_add_devices(bus);
691 return bus; 729 return bus;
692} 730}
@@ -713,9 +751,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
713 } 751 }
714 752
715 if (cmd != oldcmd) { 753 if (cmd != oldcmd) {
716 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 754 pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
717 pci_name(dev), cmd);
718 /* Enable the appropriate bits in the PCI command register. */
719 pci_write_config_word(dev, PCI_COMMAND, cmd); 755 pci_write_config_word(dev, PCI_COMMAND, cmd);
720 } 756 }
721 return 0; 757 return 0;
@@ -1075,8 +1111,8 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
1075 sp = prop->names; 1111 sp = prop->names;
1076 1112
1077 if (ofpci_verbose) 1113 if (ofpci_verbose)
1078 printk("PCI: Making slots for [%s] mask[0x%02x]\n", 1114 pci_info(bus, "Making slots for [%s] mask[0x%02x]\n",
1079 node->full_name, mask); 1115 node->full_name, mask);
1080 1116
1081 i = 0; 1117 i = 0;
1082 while (mask) { 1118 while (mask) {
@@ -1089,12 +1125,12 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
1089 } 1125 }
1090 1126
1091 if (ofpci_verbose) 1127 if (ofpci_verbose)
1092 printk("PCI: Making slot [%s]\n", sp); 1128 pci_info(bus, "Making slot [%s]\n", sp);
1093 1129
1094 pci_slot = pci_create_slot(bus, i, sp, NULL); 1130 pci_slot = pci_create_slot(bus, i, sp, NULL);
1095 if (IS_ERR(pci_slot)) 1131 if (IS_ERR(pci_slot))
1096 printk(KERN_ERR "PCI: pci_create_slot returned %ld\n", 1132 pci_err(bus, "pci_create_slot returned %ld\n",
1097 PTR_ERR(pci_slot)); 1133 PTR_ERR(pci_slot));
1098 1134
1099 sp += strlen(sp) + 1; 1135 sp += strlen(sp) + 1;
1100 mask &= ~this_bit; 1136 mask &= ~this_bit;
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 38d46bcc8634..4759ccd542fe 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -329,23 +329,6 @@ void pci_get_pbm_props(struct pci_pbm_info *pbm)
329 } 329 }
330} 330}
331 331
332static void pci_register_legacy_regions(struct resource *io_res,
333 struct resource *mem_res)
334{
335 struct resource *p;
336
337 /* VGA Video RAM. */
338 p = kzalloc(sizeof(*p), GFP_KERNEL);
339 if (!p)
340 return;
341
342 p->name = "Video RAM area";
343 p->start = mem_res->start + 0xa0000UL;
344 p->end = p->start + 0x1ffffUL;
345 p->flags = IORESOURCE_BUSY;
346 request_resource(mem_res, p);
347}
348
349static void pci_register_iommu_region(struct pci_pbm_info *pbm) 332static void pci_register_iommu_region(struct pci_pbm_info *pbm)
350{ 333{
351 const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", 334 const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma",
@@ -487,8 +470,6 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
487 if (pbm->mem64_space.flags) 470 if (pbm->mem64_space.flags)
488 request_resource(&iomem_resource, &pbm->mem64_space); 471 request_resource(&iomem_resource, &pbm->mem64_space);
489 472
490 pci_register_legacy_regions(&pbm->io_space,
491 &pbm->mem_space);
492 pci_register_iommu_region(pbm); 473 pci_register_iommu_region(pbm);
493} 474}
494 475
@@ -508,8 +489,8 @@ void pci_scan_for_target_abort(struct pci_pbm_info *pbm,
508 PCI_STATUS_REC_TARGET_ABORT)); 489 PCI_STATUS_REC_TARGET_ABORT));
509 if (error_bits) { 490 if (error_bits) {
510 pci_write_config_word(pdev, PCI_STATUS, error_bits); 491 pci_write_config_word(pdev, PCI_STATUS, error_bits);
511 printk("%s: Device %s saw Target Abort [%016x]\n", 492 pci_info(pdev, "%s: Device saw Target Abort [%016x]\n",
512 pbm->name, pci_name(pdev), status); 493 pbm->name, status);
513 } 494 }
514 } 495 }
515 496
@@ -531,8 +512,8 @@ void pci_scan_for_master_abort(struct pci_pbm_info *pbm,
531 (status & (PCI_STATUS_REC_MASTER_ABORT)); 512 (status & (PCI_STATUS_REC_MASTER_ABORT));
532 if (error_bits) { 513 if (error_bits) {
533 pci_write_config_word(pdev, PCI_STATUS, error_bits); 514 pci_write_config_word(pdev, PCI_STATUS, error_bits);
534 printk("%s: Device %s received Master Abort [%016x]\n", 515 pci_info(pdev, "%s: Device received Master Abort "
535 pbm->name, pci_name(pdev), status); 516 "[%016x]\n", pbm->name, status);
536 } 517 }
537 } 518 }
538 519
@@ -555,8 +536,8 @@ void pci_scan_for_parity_error(struct pci_pbm_info *pbm,
555 PCI_STATUS_DETECTED_PARITY)); 536 PCI_STATUS_DETECTED_PARITY));
556 if (error_bits) { 537 if (error_bits) {
557 pci_write_config_word(pdev, PCI_STATUS, error_bits); 538 pci_write_config_word(pdev, PCI_STATUS, error_bits);
558 printk("%s: Device %s saw Parity Error [%016x]\n", 539 pci_info(pdev, "%s: Device saw Parity Error [%016x]\n",
559 pbm->name, pci_name(pdev), status); 540 pbm->name, status);
560 } 541 }
561 } 542 }
562 543
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 1994d7638406..fb5899cbfa51 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -191,8 +191,8 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
191 break; 191 break;
192 } 192 }
193 if (i >= pbm->msi_num) { 193 if (i >= pbm->msi_num) {
194 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n", 194 pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
195 pbm->name, irq); 195 irq);
196 return; 196 return;
197 } 197 }
198 198
@@ -201,9 +201,9 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
201 201
202 err = ops->msi_teardown(pbm, msi_num); 202 err = ops->msi_teardown(pbm, msi_num);
203 if (err) { 203 if (err) {
204 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, " 204 pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
205 "irq %u, gives error %d\n", 205 "irq %u, gives error %d\n", pbm->name, msi_num, irq,
206 pbm->name, msi_num, irq, err); 206 err);
207 return; 207 return;
208 } 208 }
209 209
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 22f8774977d5..ee4c9a9a171c 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -518,10 +518,10 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
518 * board in a PCI slot. We must remap it 518 * board in a PCI slot. We must remap it
519 * under 64K but it is not done yet. XXX 519 * under 64K but it is not done yet. XXX
520 */ 520 */
521 printk("PCIC: Skipping I/O space at 0x%lx, " 521 pci_info(dev, "PCIC: Skipping I/O space at "
522 "this will Oops if a driver attaches " 522 "0x%lx, this will Oops if a driver "
523 "device '%s' at %02x:%02x)\n", address, 523 "attaches device '%s'\n", address,
524 namebuf, dev->bus->number, dev->devfn); 524 namebuf);
525 } 525 }
526 } 526 }
527 } 527 }
@@ -551,8 +551,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
551 p++; 551 p++;
552 } 552 }
553 if (i >= pcic->pcic_imdim) { 553 if (i >= pcic->pcic_imdim) {
554 printk("PCIC: device %s devfn %02x:%02x not found in %d\n", 554 pci_info(dev, "PCIC: device %s not found in %d\n", namebuf,
555 namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim); 555 pcic->pcic_imdim);
556 dev->irq = 0; 556 dev->irq = 0;
557 return; 557 return;
558 } 558 }
@@ -565,7 +565,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
565 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); 565 ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
566 real_irq = ivec >> ((i-4) << 2) & 0xF; 566 real_irq = ivec >> ((i-4) << 2) & 0xF;
567 } else { /* Corrupted map */ 567 } else { /* Corrupted map */
568 printk("PCIC: BAD PIN %d\n", i); for (;;) {} 568 pci_info(dev, "PCIC: BAD PIN %d\n", i); for (;;) {}
569 } 569 }
570/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */ 570/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
571 571
@@ -574,10 +574,10 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
574 */ 574 */
575 if (real_irq == 0 || p->force) { 575 if (real_irq == 0 || p->force) {
576 if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */ 576 if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
577 printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {} 577 pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
578 } 578 }
579 printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n", 579 pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq,
580 p->irq, p->pin, dev->bus->number, dev->devfn); 580 p->pin);
581 real_irq = p->irq; 581 real_irq = p->irq;
582 582
583 i = p->pin; 583 i = p->pin;
@@ -602,15 +602,13 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
602void pcibios_fixup_bus(struct pci_bus *bus) 602void pcibios_fixup_bus(struct pci_bus *bus)
603{ 603{
604 struct pci_dev *dev; 604 struct pci_dev *dev;
605 int i, has_io, has_mem;
606 unsigned int cmd = 0;
607 struct linux_pcic *pcic; 605 struct linux_pcic *pcic;
608 /* struct linux_pbm_info* pbm = &pcic->pbm; */ 606 /* struct linux_pbm_info* pbm = &pcic->pbm; */
609 int node; 607 int node;
610 struct pcidev_cookie *pcp; 608 struct pcidev_cookie *pcp;
611 609
612 if (!pcic0_up) { 610 if (!pcic0_up) {
613 printk("pcibios_fixup_bus: no PCIC\n"); 611 pci_info(bus, "pcibios_fixup_bus: no PCIC\n");
614 return; 612 return;
615 } 613 }
616 pcic = &pcic0; 614 pcic = &pcic0;
@@ -619,44 +617,12 @@ void pcibios_fixup_bus(struct pci_bus *bus)
619 * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus); 617 * Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus);
620 */ 618 */
621 if (bus->number != 0) { 619 if (bus->number != 0) {
622 printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number); 620 pci_info(bus, "pcibios_fixup_bus: nonzero bus 0x%x\n",
621 bus->number);
623 return; 622 return;
624 } 623 }
625 624
626 list_for_each_entry(dev, &bus->devices, bus_list) { 625 list_for_each_entry(dev, &bus->devices, bus_list) {
627
628 /*
629 * Comment from i386 branch:
630 * There are buggy BIOSes that forget to enable I/O and memory
631 * access to PCI devices. We try to fix this, but we need to
632 * be sure that the BIOS didn't forget to assign an address
633 * to the device. [mj]
634 * OBP is a case of such BIOS :-)
635 */
636 has_io = has_mem = 0;
637 for(i=0; i<6; i++) {
638 unsigned long f = dev->resource[i].flags;
639 if (f & IORESOURCE_IO) {
640 has_io = 1;
641 } else if (f & IORESOURCE_MEM)
642 has_mem = 1;
643 }
644 pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
645 if (has_io && !(cmd & PCI_COMMAND_IO)) {
646 printk("PCIC: Enabling I/O for device %02x:%02x\n",
647 dev->bus->number, dev->devfn);
648 cmd |= PCI_COMMAND_IO;
649 pcic_write_config(dev->bus, dev->devfn,
650 PCI_COMMAND, 2, cmd);
651 }
652 if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
653 printk("PCIC: Enabling memory for device %02x:%02x\n",
654 dev->bus->number, dev->devfn);
655 cmd |= PCI_COMMAND_MEMORY;
656 pcic_write_config(dev->bus, dev->devfn,
657 PCI_COMMAND, 2, cmd);
658 }
659
660 node = pdev_to_pnode(&pcic->pbm, dev); 626 node = pdev_to_pnode(&pcic->pbm, dev);
661 if(node == 0) 627 if(node == 0)
662 node = -1; 628 node = -1;
@@ -675,6 +641,34 @@ void pcibios_fixup_bus(struct pci_bus *bus)
675 } 641 }
676} 642}
677 643
644int pcibios_enable_device(struct pci_dev *dev, int mask)
645{
646 u16 cmd, oldcmd;
647 int i;
648
649 pci_read_config_word(dev, PCI_COMMAND, &cmd);
650 oldcmd = cmd;
651
652 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
653 struct resource *res = &dev->resource[i];
654
655 /* Only set up the requested stuff */
656 if (!(mask & (1<<i)))
657 continue;
658
659 if (res->flags & IORESOURCE_IO)
660 cmd |= PCI_COMMAND_IO;
661 if (res->flags & IORESOURCE_MEM)
662 cmd |= PCI_COMMAND_MEMORY;
663 }
664
665 if (cmd != oldcmd) {
666 pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
667 pci_write_config_word(dev, PCI_COMMAND, cmd);
668 }
669 return 0;
670}
671
678/* Makes compiler happy */ 672/* Makes compiler happy */
679static volatile int pcic_timer_dummy; 673static volatile int pcic_timer_dummy;
680 674
@@ -747,17 +741,11 @@ static void watchdog_reset() {
747} 741}
748#endif 742#endif
749 743
750int pcibios_enable_device(struct pci_dev *pdev, int mask)
751{
752 return 0;
753}
754
755/* 744/*
756 * NMI 745 * NMI
757 */ 746 */
758void pcic_nmi(unsigned int pend, struct pt_regs *regs) 747void pcic_nmi(unsigned int pend, struct pt_regs *regs)
759{ 748{
760
761 pend = swab32(pend); 749 pend = swab32(pend);
762 750
763 if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) { 751 if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index f0114007e915..e5f753cbb1c3 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -59,24 +59,15 @@ int early_pci_allowed(void)
59 59
60void early_dump_pci_device(u8 bus, u8 slot, u8 func) 60void early_dump_pci_device(u8 bus, u8 slot, u8 func)
61{ 61{
62 u32 value[256 / 4];
62 int i; 63 int i;
63 int j;
64 u32 val;
65 64
66 printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:", 65 pr_info("pci 0000:%02x:%02x.%d config space:\n", bus, slot, func);
67 bus, slot, func);
68 66
69 for (i = 0; i < 256; i += 4) { 67 for (i = 0; i < 256; i += 4)
70 if (!(i & 0x0f)) 68 value[i / 4] = read_pci_config(bus, slot, func, i);
71 printk("\n %02x:",i);
72 69
73 val = read_pci_config(bus, slot, func, i); 70 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false);
74 for (j = 0; j < 4; j++) {
75 printk(" %02x", val & 0xff);
76 val >>= 8;
77 }
78 }
79 printk("\n");
80} 71}
81 72
82void early_dump_pci_devices(void) 73void early_dump_pci_devices(void)
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 54ef19e90705..13f4485ca388 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -636,6 +636,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
636DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); 636DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
637DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); 637DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
638DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); 638DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
639DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid);
640DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
641DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
642DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
639 643
640#ifdef CONFIG_PHYS_ADDR_T_64BIT 644#ifdef CONFIG_PHYS_ADDR_T_64BIT
641 645
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 6ddf0a30c60d..883024054b05 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -20,8 +20,6 @@
20 20
21#define pcibios_assign_all_busses() 0 21#define pcibios_assign_all_busses() 0
22 22
23extern struct pci_controller* pcibios_alloc_controller(void);
24
25/* Assume some values. (We should revise them, if necessary) */ 23/* Assume some values. (We should revise them, if necessary) */
26 24
27#define PCIBIOS_MIN_IO 0x2000 25#define PCIBIOS_MIN_IO 0x2000
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index b7c7a60c7000..21f13e9aabe1 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -41,8 +41,8 @@
41 * pci_bus_add_device 41 * pci_bus_add_device
42 */ 42 */
43 43
44struct pci_controller* pci_ctrl_head; 44static struct pci_controller *pci_ctrl_head;
45struct pci_controller** pci_ctrl_tail = &pci_ctrl_head; 45static struct pci_controller **pci_ctrl_tail = &pci_ctrl_head;
46 46
47static int pci_bus_count; 47static int pci_bus_count;
48 48
@@ -80,50 +80,6 @@ pcibios_align_resource(void *data, const struct resource *res,
80 return start; 80 return start;
81} 81}
82 82
83int
84pcibios_enable_resources(struct pci_dev *dev, int mask)
85{
86 u16 cmd, old_cmd;
87 int idx;
88 struct resource *r;
89
90 pci_read_config_word(dev, PCI_COMMAND, &cmd);
91 old_cmd = cmd;
92 for(idx=0; idx<6; idx++) {
93 r = &dev->resource[idx];
94 if (!r->start && r->end) {
95 pr_err("PCI: Device %s not available because "
96 "of resource collisions\n", pci_name(dev));
97 return -EINVAL;
98 }
99 if (r->flags & IORESOURCE_IO)
100 cmd |= PCI_COMMAND_IO;
101 if (r->flags & IORESOURCE_MEM)
102 cmd |= PCI_COMMAND_MEMORY;
103 }
104 if (dev->resource[PCI_ROM_RESOURCE].start)
105 cmd |= PCI_COMMAND_MEMORY;
106 if (cmd != old_cmd) {
107 pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
108 pci_name(dev), old_cmd, cmd);
109 pci_write_config_word(dev, PCI_COMMAND, cmd);
110 }
111 return 0;
112}
113
114struct pci_controller * __init pcibios_alloc_controller(void)
115{
116 struct pci_controller *pci_ctrl;
117
118 pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
119 memset(pci_ctrl, 0, sizeof(struct pci_controller));
120
121 *pci_ctrl_tail = pci_ctrl;
122 pci_ctrl_tail = &pci_ctrl->next;
123
124 return pci_ctrl;
125}
126
127static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, 83static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
128 struct list_head *resources) 84 struct list_head *resources)
129{ 85{
@@ -223,8 +179,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
223 for (idx=0; idx<6; idx++) { 179 for (idx=0; idx<6; idx++) {
224 r = &dev->resource[idx]; 180 r = &dev->resource[idx];
225 if (!r->start && r->end) { 181 if (!r->start && r->end) {
226 pr_err("PCI: Device %s not available because " 182 pci_err(dev, "can't enable device: resource collisions\n");
227 "of resource collisions\n", pci_name(dev));
228 return -EINVAL; 183 return -EINVAL;
229 } 184 }
230 if (r->flags & IORESOURCE_IO) 185 if (r->flags & IORESOURCE_IO)
@@ -233,29 +188,13 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
233 cmd |= PCI_COMMAND_MEMORY; 188 cmd |= PCI_COMMAND_MEMORY;
234 } 189 }
235 if (cmd != old_cmd) { 190 if (cmd != old_cmd) {
236 pr_info("PCI: Enabling device %s (%04x -> %04x)\n", 191 pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
237 pci_name(dev), old_cmd, cmd);
238 pci_write_config_word(dev, PCI_COMMAND, cmd); 192 pci_write_config_word(dev, PCI_COMMAND, cmd);
239 } 193 }
240 194
241 return 0; 195 return 0;
242} 196}
243 197
244#ifdef CONFIG_PROC_FS
245
246/*
247 * Return the index of the PCI controller for device pdev.
248 */
249
250int
251pci_controller_num(struct pci_dev *dev)
252{
253 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
254 return pci_ctrl->index;
255}
256
257#endif /* CONFIG_PROC_FS */
258
259/* 198/*
260 * Platform support for /proc/bus/pci/X/Y mmap()s. 199 * Platform support for /proc/bus/pci/X/Y mmap()s.
261 * -- paulus. 200 * -- paulus.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0da18bde6a16..7433035ded95 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -153,6 +153,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = {
153 { OSC_PCI_EXPRESS_PME_CONTROL, "PME" }, 153 { OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
154 { OSC_PCI_EXPRESS_AER_CONTROL, "AER" }, 154 { OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
155 { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" }, 155 { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
156 { OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
156}; 157};
157 158
158static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, 159static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
@@ -472,9 +473,17 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
472 } 473 }
473 474
474 control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL 475 control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
475 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
476 | OSC_PCI_EXPRESS_PME_CONTROL; 476 | OSC_PCI_EXPRESS_PME_CONTROL;
477 477
478 if (IS_ENABLED(CONFIG_PCIEASPM))
479 control |= OSC_PCI_EXPRESS_LTR_CONTROL;
480
481 if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
482 control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
483
484 if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
485 control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
486
478 if (pci_aer_available()) { 487 if (pci_aer_available()) {
479 if (aer_acpi_firmware_first()) 488 if (aer_acpi_firmware_first())
480 dev_info(&device->dev, 489 dev_info(&device->dev,
@@ -900,11 +909,15 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
900 909
901 host_bridge = to_pci_host_bridge(bus->bridge); 910 host_bridge = to_pci_host_bridge(bus->bridge);
902 if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)) 911 if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
903 host_bridge->native_hotplug = 0; 912 host_bridge->native_pcie_hotplug = 0;
913 if (!(root->osc_control_set & OSC_PCI_SHPC_NATIVE_HP_CONTROL))
914 host_bridge->native_shpc_hotplug = 0;
904 if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL)) 915 if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL))
905 host_bridge->native_aer = 0; 916 host_bridge->native_aer = 0;
906 if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL)) 917 if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL))
907 host_bridge->native_pme = 0; 918 host_bridge->native_pme = 0;
919 if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
920 host_bridge->native_ltr = 0;
908 921
909 pci_scan_child_bus(bus); 922 pci_scan_child_bus(bus);
910 pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info, 923 pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info,
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index c1c982908b4b..bf601c7629fb 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -56,11 +56,6 @@
56#include "chip_registers.h" 56#include "chip_registers.h"
57#include "aspm.h" 57#include "aspm.h"
58 58
59/* link speed vector for Gen3 speed - not in Linux headers */
60#define GEN1_SPEED_VECTOR 0x1
61#define GEN2_SPEED_VECTOR 0x2
62#define GEN3_SPEED_VECTOR 0x3
63
64/* 59/*
65 * This file contains PCIe utility routines. 60 * This file contains PCIe utility routines.
66 */ 61 */
@@ -262,7 +257,7 @@ static u32 extract_speed(u16 linkstat)
262 case PCI_EXP_LNKSTA_CLS_5_0GB: 257 case PCI_EXP_LNKSTA_CLS_5_0GB:
263 speed = 5000; /* Gen 2, 5GHz */ 258 speed = 5000; /* Gen 2, 5GHz */
264 break; 259 break;
265 case GEN3_SPEED_VECTOR: 260 case PCI_EXP_LNKSTA_CLS_8_0GB:
266 speed = 8000; /* Gen 3, 8GHz */ 261 speed = 8000; /* Gen 3, 8GHz */
267 break; 262 break;
268 } 263 }
@@ -317,7 +312,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
317 return ret; 312 return ret;
318 } 313 }
319 314
320 if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) { 315 if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
321 dd_dev_info(dd, 316 dd_dev_info(dd,
322 "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n", 317 "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
323 linkcap & PCI_EXP_LNKCAP_SLS); 318 linkcap & PCI_EXP_LNKCAP_SLS);
@@ -694,9 +689,6 @@ const struct pci_error_handlers hfi1_pci_err_handler = {
694/* gasket block secondary bus reset delay */ 689/* gasket block secondary bus reset delay */
695#define SBR_DELAY_US 200000 /* 200ms */ 690#define SBR_DELAY_US 200000 /* 200ms */
696 691
697/* mask for PCIe capability register lnkctl2 target link speed */
698#define LNKCTL2_TARGET_LINK_SPEED_MASK 0xf
699
700static uint pcie_target = 3; 692static uint pcie_target = 3;
701module_param(pcie_target, uint, S_IRUGO); 693module_param(pcie_target, uint, S_IRUGO);
702MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)"); 694MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
@@ -1045,13 +1037,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
1045 return 0; 1037 return 0;
1046 1038
1047 if (pcie_target == 1) { /* target Gen1 */ 1039 if (pcie_target == 1) { /* target Gen1 */
1048 target_vector = GEN1_SPEED_VECTOR; 1040 target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
1049 target_speed = 2500; 1041 target_speed = 2500;
1050 } else if (pcie_target == 2) { /* target Gen2 */ 1042 } else if (pcie_target == 2) { /* target Gen2 */
1051 target_vector = GEN2_SPEED_VECTOR; 1043 target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
1052 target_speed = 5000; 1044 target_speed = 5000;
1053 } else if (pcie_target == 3) { /* target Gen3 */ 1045 } else if (pcie_target == 3) { /* target Gen3 */
1054 target_vector = GEN3_SPEED_VECTOR; 1046 target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
1055 target_speed = 8000; 1047 target_speed = 8000;
1056 } else { 1048 } else {
1057 /* off or invalid target - skip */ 1049 /* off or invalid target - skip */
@@ -1290,8 +1282,8 @@ retry:
1290 dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, 1282 dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
1291 (u32)lnkctl2); 1283 (u32)lnkctl2);
1292 /* only write to parent if target is not as high as ours */ 1284 /* only write to parent if target is not as high as ours */
1293 if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) { 1285 if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
1294 lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK; 1286 lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
1295 lnkctl2 |= target_vector; 1287 lnkctl2 |= target_vector;
1296 dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, 1288 dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
1297 (u32)lnkctl2); 1289 (u32)lnkctl2);
@@ -1316,7 +1308,7 @@ retry:
1316 1308
1317 dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, 1309 dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
1318 (u32)lnkctl2); 1310 (u32)lnkctl2);
1319 lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK; 1311 lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
1320 lnkctl2 |= target_vector; 1312 lnkctl2 |= target_vector;
1321 dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, 1313 dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
1322 (u32)lnkctl2); 1314 (u32)lnkctl2);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8fb8c737fffe..d60c7dc62905 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -354,6 +354,9 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
354 }; 354 };
355 int i, pos; 355 int i, pos;
356 356
357 if (pci_ats_disabled())
358 return false;
359
357 for (i = 0; i < 3; ++i) { 360 for (i = 0; i < 3; ++i) {
358 pos = pci_find_ext_capability(pdev, caps[i]); 361 pos = pci_find_ext_capability(pdev, caps[i]);
359 if (pos == 0) 362 if (pos == 0)
@@ -3523,9 +3526,11 @@ int amd_iommu_device_info(struct pci_dev *pdev,
3523 3526
3524 memset(info, 0, sizeof(*info)); 3527 memset(info, 0, sizeof(*info));
3525 3528
3526 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS); 3529 if (!pci_ats_disabled()) {
3527 if (pos) 3530 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3528 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; 3531 if (pos)
3532 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3533 }
3529 3534
3530 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); 3535 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3531 if (pos) 3536 if (pos)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 749d8f235346..772b404a6604 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2459,7 +2459,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2459 if (dev && dev_is_pci(dev)) { 2459 if (dev && dev_is_pci(dev)) {
2460 struct pci_dev *pdev = to_pci_dev(info->dev); 2460 struct pci_dev *pdev = to_pci_dev(info->dev);
2461 2461
2462 if (ecap_dev_iotlb_support(iommu->ecap) && 2462 if (!pci_ats_disabled() &&
2463 ecap_dev_iotlb_support(iommu->ecap) &&
2463 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) && 2464 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2464 dmar_find_matched_atsr_unit(pdev)) 2465 dmar_find_matched_atsr_unit(pdev))
2465 info->ats_supported = 1; 2466 info->ats_supported = 1;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index fe8897e64635..7b370466a227 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -203,7 +203,7 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
203 if (!val) 203 if (!val)
204 return false; 204 return false;
205 205
206 if (test->last_irq - pdev->irq == msi_num - 1) 206 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
207 return true; 207 return true;
208 208
209 return false; 209 return false;
@@ -233,7 +233,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
233 orig_src_addr = dma_alloc_coherent(dev, size + alignment, 233 orig_src_addr = dma_alloc_coherent(dev, size + alignment,
234 &orig_src_phys_addr, GFP_KERNEL); 234 &orig_src_phys_addr, GFP_KERNEL);
235 if (!orig_src_addr) { 235 if (!orig_src_addr) {
236 dev_err(dev, "failed to allocate source buffer\n"); 236 dev_err(dev, "Failed to allocate source buffer\n");
237 ret = false; 237 ret = false;
238 goto err; 238 goto err;
239 } 239 }
@@ -259,7 +259,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
259 orig_dst_addr = dma_alloc_coherent(dev, size + alignment, 259 orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
260 &orig_dst_phys_addr, GFP_KERNEL); 260 &orig_dst_phys_addr, GFP_KERNEL);
261 if (!orig_dst_addr) { 261 if (!orig_dst_addr) {
262 dev_err(dev, "failed to allocate destination address\n"); 262 dev_err(dev, "Failed to allocate destination address\n");
263 ret = false; 263 ret = false;
264 goto err_orig_src_addr; 264 goto err_orig_src_addr;
265 } 265 }
@@ -321,7 +321,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
321 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, 321 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
322 GFP_KERNEL); 322 GFP_KERNEL);
323 if (!orig_addr) { 323 if (!orig_addr) {
324 dev_err(dev, "failed to allocate address\n"); 324 dev_err(dev, "Failed to allocate address\n");
325 ret = false; 325 ret = false;
326 goto err; 326 goto err;
327 } 327 }
@@ -382,7 +382,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
382 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, 382 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
383 GFP_KERNEL); 383 GFP_KERNEL);
384 if (!orig_addr) { 384 if (!orig_addr) {
385 dev_err(dev, "failed to allocate destination address\n"); 385 dev_err(dev, "Failed to allocate destination address\n");
386 ret = false; 386 ret = false;
387 goto err; 387 goto err;
388 } 388 }
@@ -513,31 +513,31 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
513 if (!no_msi) { 513 if (!no_msi) {
514 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); 514 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
515 if (irq < 0) 515 if (irq < 0)
516 dev_err(dev, "failed to get MSI interrupts\n"); 516 dev_err(dev, "Failed to get MSI interrupts\n");
517 test->num_irqs = irq; 517 test->num_irqs = irq;
518 } 518 }
519 519
520 err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler, 520 err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
521 IRQF_SHARED, DRV_MODULE_NAME, test); 521 IRQF_SHARED, DRV_MODULE_NAME, test);
522 if (err) { 522 if (err) {
523 dev_err(dev, "failed to request IRQ %d\n", pdev->irq); 523 dev_err(dev, "Failed to request IRQ %d\n", pdev->irq);
524 goto err_disable_msi; 524 goto err_disable_msi;
525 } 525 }
526 526
527 for (i = 1; i < irq; i++) { 527 for (i = 1; i < irq; i++) {
528 err = devm_request_irq(dev, pdev->irq + i, 528 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
529 pci_endpoint_test_irqhandler, 529 pci_endpoint_test_irqhandler,
530 IRQF_SHARED, DRV_MODULE_NAME, test); 530 IRQF_SHARED, DRV_MODULE_NAME, test);
531 if (err) 531 if (err)
532 dev_err(dev, "failed to request IRQ %d for MSI %d\n", 532 dev_err(dev, "failed to request IRQ %d for MSI %d\n",
533 pdev->irq + i, i + 1); 533 pci_irq_vector(pdev, i), i + 1);
534 } 534 }
535 535
536 for (bar = BAR_0; bar <= BAR_5; bar++) { 536 for (bar = BAR_0; bar <= BAR_5; bar++) {
537 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 537 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
538 base = pci_ioremap_bar(pdev, bar); 538 base = pci_ioremap_bar(pdev, bar);
539 if (!base) { 539 if (!base) {
540 dev_err(dev, "failed to read BAR%d\n", bar); 540 dev_err(dev, "Failed to read BAR%d\n", bar);
541 WARN_ON(bar == test_reg_bar); 541 WARN_ON(bar == test_reg_bar);
542 } 542 }
543 test->bar[bar] = base; 543 test->bar[bar] = base;
@@ -557,7 +557,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
557 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); 557 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
558 if (id < 0) { 558 if (id < 0) {
559 err = id; 559 err = id;
560 dev_err(dev, "unable to get id\n"); 560 dev_err(dev, "Unable to get id\n");
561 goto err_iounmap; 561 goto err_iounmap;
562 } 562 }
563 563
@@ -573,7 +573,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
573 573
574 err = misc_register(misc_device); 574 err = misc_register(misc_device);
575 if (err) { 575 if (err) {
576 dev_err(dev, "failed to register device\n"); 576 dev_err(dev, "Failed to register device\n");
577 goto err_kfree_name; 577 goto err_kfree_name;
578 } 578 }
579 579
@@ -592,7 +592,7 @@ err_iounmap:
592 } 592 }
593 593
594 for (i = 0; i < irq; i++) 594 for (i = 0; i < irq; i++)
595 devm_free_irq(dev, pdev->irq + i, test); 595 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
596 596
597err_disable_msi: 597err_disable_msi:
598 pci_disable_msi(pdev); 598 pci_disable_msi(pdev);
@@ -625,7 +625,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
625 pci_iounmap(pdev, test->bar[bar]); 625 pci_iounmap(pdev, test->bar[bar]);
626 } 626 }
627 for (i = 0; i < test->num_irqs; i++) 627 for (i = 0; i < test->num_irqs; i++)
628 devm_free_irq(&pdev->dev, pdev->irq + i, test); 628 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
629 pci_disable_msi(pdev); 629 pci_disable_msi(pdev);
630 pci_release_regions(pdev); 630 pci_release_regions(pdev);
631 pci_disable_device(pdev); 631 pci_disable_device(pdev);
@@ -634,6 +634,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
634static const struct pci_device_id pci_endpoint_test_tbl[] = { 634static const struct pci_device_id pci_endpoint_test_tbl[] = {
635 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, 635 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
636 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, 636 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
637 { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
637 { } 638 { }
638}; 639};
639MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); 640MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a822e70c2af3..f2af87d70594 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3386,32 +3386,6 @@ err_disable_device:
3386} 3386}
3387 3387
3388/*****************************************************************************/ 3388/*****************************************************************************/
3389static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
3390{
3391 int rc;
3392
3393 if (numvfs > 0) {
3394 rc = pci_enable_sriov(dev, numvfs);
3395 if (rc != 0) {
3396 dev_err(&dev->dev,
3397 "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
3398 numvfs, rc);
3399 return rc;
3400 }
3401
3402 return numvfs;
3403 }
3404
3405 if (numvfs == 0) {
3406 pci_disable_sriov(dev);
3407 return 0;
3408 }
3409
3410 return -EINVAL;
3411}
3412
3413/*****************************************************************************/
3414/*****************************************************************************/
3415 3389
3416/* ena_remove - Device Removal Routine 3390/* ena_remove - Device Removal Routine
3417 * @pdev: PCI device information struct 3391 * @pdev: PCI device information struct
@@ -3526,7 +3500,7 @@ static struct pci_driver ena_pci_driver = {
3526 .suspend = ena_suspend, 3500 .suspend = ena_suspend,
3527 .resume = ena_resume, 3501 .resume = ena_resume,
3528#endif 3502#endif
3529 .sriov_configure = ena_sriov_configure, 3503 .sriov_configure = pci_sriov_configure_simple,
3530}; 3504};
3531 3505
3532static int __init ena_init(void) 3506static int __init ena_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c766ae23bc74..5b1ed240bf18 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13922,8 +13922,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13922{ 13922{
13923 struct net_device *dev = NULL; 13923 struct net_device *dev = NULL;
13924 struct bnx2x *bp; 13924 struct bnx2x *bp;
13925 enum pcie_link_width pcie_width;
13926 enum pci_bus_speed pcie_speed;
13927 int rc, max_non_def_sbs; 13925 int rc, max_non_def_sbs;
13928 int rx_count, tx_count, rss_count, doorbell_size; 13926 int rx_count, tx_count, rss_count, doorbell_size;
13929 int max_cos_est; 13927 int max_cos_est;
@@ -14091,21 +14089,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
14091 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 14089 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14092 rtnl_unlock(); 14090 rtnl_unlock();
14093 } 14091 }
14094 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || 14092 BNX2X_DEV_INFO(
14095 pcie_speed == PCI_SPEED_UNKNOWN || 14093 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
14096 pcie_width == PCIE_LNK_WIDTH_UNKNOWN) 14094 board_info[ent->driver_data].name,
14097 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); 14095 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14098 else 14096 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14099 BNX2X_DEV_INFO( 14097 pcie_print_link_status(bp->pdev);
14100 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
14101 board_info[ent->driver_data].name,
14102 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14103 pcie_width,
14104 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
14105 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
14106 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
14107 "Unknown",
14108 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14109 14098
14110 bnx2x_register_phc(bp); 14099 bnx2x_register_phc(bp);
14111 14100
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dfa0839f6656..176fc9f4d7de 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -8685,22 +8685,6 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
8685 return rc; 8685 return rc;
8686} 8686}
8687 8687
8688static void bnxt_parse_log_pcie_link(struct bnxt *bp)
8689{
8690 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
8691 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
8692
8693 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
8694 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
8695 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
8696 else
8697 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
8698 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
8699 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
8700 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
8701 "Unknown", width);
8702}
8703
8704static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8688static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8705{ 8689{
8706 static int version_printed; 8690 static int version_printed;
@@ -8915,8 +8899,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8915 netdev_info(dev, "%s found at mem %lx, node addr %pM\n", 8899 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
8916 board_info[ent->driver_data].name, 8900 board_info[ent->driver_data].name,
8917 (long)pci_resource_start(pdev, 0), dev->dev_addr); 8901 (long)pci_resource_start(pdev, 0), dev->dev_addr);
8918 8902 pcie_print_link_status(pdev);
8919 bnxt_parse_log_pcie_link(bp);
8920 8903
8921 return 0; 8904 return 0;
8922 8905
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0efae2030e71..35cb3ae4f7b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5066,79 +5066,6 @@ static int init_rss(struct adapter *adap)
5066 return 0; 5066 return 0;
5067} 5067}
5068 5068
5069static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
5070 enum pci_bus_speed *speed,
5071 enum pcie_link_width *width)
5072{
5073 u32 lnkcap1, lnkcap2;
5074 int err1, err2;
5075
5076#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
5077
5078 *speed = PCI_SPEED_UNKNOWN;
5079 *width = PCIE_LNK_WIDTH_UNKNOWN;
5080
5081 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
5082 &lnkcap1);
5083 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
5084 &lnkcap2);
5085 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
5086 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5087 *speed = PCIE_SPEED_8_0GT;
5088 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5089 *speed = PCIE_SPEED_5_0GT;
5090 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5091 *speed = PCIE_SPEED_2_5GT;
5092 }
5093 if (!err1) {
5094 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
5095 if (!lnkcap2) { /* pre-r3.0 */
5096 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
5097 *speed = PCIE_SPEED_5_0GT;
5098 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
5099 *speed = PCIE_SPEED_2_5GT;
5100 }
5101 }
5102
5103 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5104 return err1 ? err1 : err2 ? err2 : -EINVAL;
5105 return 0;
5106}
5107
5108static void cxgb4_check_pcie_caps(struct adapter *adap)
5109{
5110 enum pcie_link_width width, width_cap;
5111 enum pci_bus_speed speed, speed_cap;
5112
5113#define PCIE_SPEED_STR(speed) \
5114 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
5115 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
5116 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
5117 "Unknown")
5118
5119 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
5120 dev_warn(adap->pdev_dev,
5121 "Unable to determine PCIe device BW capabilities\n");
5122 return;
5123 }
5124
5125 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
5126 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
5127 dev_warn(adap->pdev_dev,
5128 "Unable to determine PCI Express bandwidth.\n");
5129 return;
5130 }
5131
5132 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
5133 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
5134 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
5135 width, width_cap);
5136 if (speed < speed_cap || width < width_cap)
5137 dev_info(adap->pdev_dev,
5138 "A slot with more lanes and/or higher speed is "
5139 "suggested for optimal performance.\n");
5140}
5141
5142/* Dump basic information about the adapter */ 5069/* Dump basic information about the adapter */
5143static void print_adapter_info(struct adapter *adapter) 5070static void print_adapter_info(struct adapter *adapter)
5144{ 5071{
@@ -5798,7 +5725,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5798 } 5725 }
5799 5726
5800 /* check for PCI Express bandwidth capabiltites */ 5727 /* check for PCI Express bandwidth capabiltites */
5801 cxgb4_check_pcie_caps(adapter); 5728 pcie_print_link_status(pdev);
5802 5729
5803 err = init_rss(adapter); 5730 err = init_rss(adapter);
5804 if (err) 5731 if (err)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 38b4e4899490..4929f7265598 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -245,9 +245,6 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
245 int expected_gts) 245 int expected_gts)
246{ 246{
247 struct ixgbe_hw *hw = &adapter->hw; 247 struct ixgbe_hw *hw = &adapter->hw;
248 int max_gts = 0;
249 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
250 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
251 struct pci_dev *pdev; 248 struct pci_dev *pdev;
252 249
253 /* Some devices are not connected over PCIe and thus do not negotiate 250 /* Some devices are not connected over PCIe and thus do not negotiate
@@ -263,49 +260,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
263 else 260 else
264 pdev = adapter->pdev; 261 pdev = adapter->pdev;
265 262
266 if (pcie_get_minimum_link(pdev, &speed, &width) || 263 pcie_print_link_status(pdev);
267 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
268 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
269 return;
270 }
271
272 switch (speed) {
273 case PCIE_SPEED_2_5GT:
274 /* 8b/10b encoding reduces max throughput by 20% */
275 max_gts = 2 * width;
276 break;
277 case PCIE_SPEED_5_0GT:
278 /* 8b/10b encoding reduces max throughput by 20% */
279 max_gts = 4 * width;
280 break;
281 case PCIE_SPEED_8_0GT:
282 /* 128b/130b encoding reduces throughput by less than 2% */
283 max_gts = 8 * width;
284 break;
285 default:
286 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
287 return;
288 }
289
290 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
291 max_gts);
292 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
293 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
294 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
295 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
296 "Unknown"),
297 width,
298 (speed == PCIE_SPEED_2_5GT ? "20%" :
299 speed == PCIE_SPEED_5_0GT ? "20%" :
300 speed == PCIE_SPEED_8_0GT ? "<2%" :
301 "Unknown"));
302
303 if (max_gts < expected_gts) {
304 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
305 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
306 expected_gts);
307 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
308 }
309} 264}
310 265
311static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 266static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e526437bacbf..d234de5505ea 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2630,24 +2630,6 @@ static void nvme_remove(struct pci_dev *pdev)
2630 nvme_put_ctrl(&dev->ctrl); 2630 nvme_put_ctrl(&dev->ctrl);
2631} 2631}
2632 2632
2633static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
2634{
2635 int ret = 0;
2636
2637 if (numvfs == 0) {
2638 if (pci_vfs_assigned(pdev)) {
2639 dev_warn(&pdev->dev,
2640 "Cannot disable SR-IOV VFs while assigned\n");
2641 return -EPERM;
2642 }
2643 pci_disable_sriov(pdev);
2644 return 0;
2645 }
2646
2647 ret = pci_enable_sriov(pdev, numvfs);
2648 return ret ? ret : numvfs;
2649}
2650
2651#ifdef CONFIG_PM_SLEEP 2633#ifdef CONFIG_PM_SLEEP
2652static int nvme_suspend(struct device *dev) 2634static int nvme_suspend(struct device *dev)
2653{ 2635{
@@ -2774,7 +2756,7 @@ static struct pci_driver nvme_driver = {
2774 .driver = { 2756 .driver = {
2775 .pm = &nvme_dev_pm_ops, 2757 .pm = &nvme_dev_pm_ops,
2776 }, 2758 },
2777 .sriov_configure = nvme_pci_sriov_configure, 2759 .sriov_configure = pci_sriov_configure_simple,
2778 .err_handler = &nvme_err_handler, 2760 .err_handler = &nvme_err_handler,
2779}; 2761};
2780 2762
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 29a487f31dae..b2f07635e94d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -67,6 +67,18 @@ config PCI_STUB
67 67
68 When in doubt, say N. 68 When in doubt, say N.
69 69
70config PCI_PF_STUB
71 tristate "PCI PF Stub driver"
72 depends on PCI
73 depends on PCI_IOV
74 help
75 Say Y or M here if you want to enable support for devices that
76 require SR-IOV support, while at the same time the PF itself is
77 not providing any actual services on the host itself such as
78 storage or networking.
79
80 When in doubt, say N.
81
70config XEN_PCIDEV_FRONTEND 82config XEN_PCIDEV_FRONTEND
71 tristate "Xen PCI Frontend" 83 tristate "Xen PCI Frontend"
72 depends on PCI && X86 && XEN 84 depends on PCI && X86 && XEN
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 952addc7bacf..84c9eef6b1c3 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PCI_LABEL) += pci-label.o
24obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o 24obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
25obj-$(CONFIG_PCI_SYSCALL) += syscall.o 25obj-$(CONFIG_PCI_SYSCALL) += syscall.o
26obj-$(CONFIG_PCI_STUB) += pci-stub.o 26obj-$(CONFIG_PCI_STUB) += pci-stub.o
27obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
27obj-$(CONFIG_PCI_ECAM) += ecam.o 28obj-$(CONFIG_PCI_ECAM) += ecam.o
28obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o 29obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
29 30
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 89305b569d3d..4923a2a8e14b 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -20,6 +20,9 @@ void pci_ats_init(struct pci_dev *dev)
20{ 20{
21 int pos; 21 int pos;
22 22
23 if (pci_ats_disabled())
24 return;
25
23 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); 26 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
24 if (!pos) 27 if (!pos)
25 return; 28 return;
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 2f3f5c50aa48..16f52c626b4b 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -1,13 +1,13 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3menu "DesignWare PCI Core Support" 3menu "DesignWare PCI Core Support"
4 depends on PCI
4 5
5config PCIE_DW 6config PCIE_DW
6 bool 7 bool
7 8
8config PCIE_DW_HOST 9config PCIE_DW_HOST
9 bool 10 bool
10 depends on PCI
11 depends on PCI_MSI_IRQ_DOMAIN 11 depends on PCI_MSI_IRQ_DOMAIN
12 select PCIE_DW 12 select PCIE_DW
13 13
@@ -22,7 +22,7 @@ config PCI_DRA7XX
22config PCI_DRA7XX_HOST 22config PCI_DRA7XX_HOST
23 bool "TI DRA7xx PCIe controller Host Mode" 23 bool "TI DRA7xx PCIe controller Host Mode"
24 depends on SOC_DRA7XX || COMPILE_TEST 24 depends on SOC_DRA7XX || COMPILE_TEST
25 depends on PCI && PCI_MSI_IRQ_DOMAIN 25 depends on PCI_MSI_IRQ_DOMAIN
26 depends on OF && HAS_IOMEM && TI_PIPE3 26 depends on OF && HAS_IOMEM && TI_PIPE3
27 select PCIE_DW_HOST 27 select PCIE_DW_HOST
28 select PCI_DRA7XX 28 select PCI_DRA7XX
@@ -51,50 +51,62 @@ config PCI_DRA7XX_EP
51 This uses the DesignWare core. 51 This uses the DesignWare core.
52 52
53config PCIE_DW_PLAT 53config PCIE_DW_PLAT
54 bool "Platform bus based DesignWare PCIe Controller" 54 bool
55 depends on PCI
56 depends on PCI_MSI_IRQ_DOMAIN
57 select PCIE_DW_HOST
58 ---help---
59 This selects the DesignWare PCIe controller support. Select this if
60 you have a PCIe controller on Platform bus.
61 55
62 If you have a controller with this interface, say Y or M here. 56config PCIE_DW_PLAT_HOST
57 bool "Platform bus based DesignWare PCIe Controller - Host mode"
58 depends on PCI && PCI_MSI_IRQ_DOMAIN
59 select PCIE_DW_HOST
60 select PCIE_DW_PLAT
61 default y
62 help
63 Enables support for the PCIe controller in the Designware IP to
64 work in host mode. There are two instances of PCIe controller in
65 Designware IP.
66 This controller can work either as EP or RC. In order to enable
67 host-specific features PCIE_DW_PLAT_HOST must be selected and in
68 order to enable device-specific features PCI_DW_PLAT_EP must be
69 selected.
63 70
64 If unsure, say N. 71config PCIE_DW_PLAT_EP
72 bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
73 depends on PCI && PCI_MSI_IRQ_DOMAIN
74 depends on PCI_ENDPOINT
75 select PCIE_DW_EP
76 select PCIE_DW_PLAT
77 help
78 Enables support for the PCIe controller in the Designware IP to
79 work in endpoint mode. There are two instances of PCIe controller
80 in Designware IP.
81 This controller can work either as EP or RC. In order to enable
82 host-specific features PCIE_DW_PLAT_HOST must be selected and in
83 order to enable device-specific features PCI_DW_PLAT_EP must be
84 selected.
65 85
66config PCI_EXYNOS 86config PCI_EXYNOS
67 bool "Samsung Exynos PCIe controller" 87 bool "Samsung Exynos PCIe controller"
68 depends on PCI 88 depends on SOC_EXYNOS5440 || COMPILE_TEST
69 depends on SOC_EXYNOS5440
70 depends on PCI_MSI_IRQ_DOMAIN 89 depends on PCI_MSI_IRQ_DOMAIN
71 select PCIEPORTBUS
72 select PCIE_DW_HOST 90 select PCIE_DW_HOST
73 91
74config PCI_IMX6 92config PCI_IMX6
75 bool "Freescale i.MX6 PCIe controller" 93 bool "Freescale i.MX6 PCIe controller"
76 depends on PCI 94 depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
77 depends on SOC_IMX6Q
78 depends on PCI_MSI_IRQ_DOMAIN 95 depends on PCI_MSI_IRQ_DOMAIN
79 select PCIEPORTBUS
80 select PCIE_DW_HOST 96 select PCIE_DW_HOST
81 97
82config PCIE_SPEAR13XX 98config PCIE_SPEAR13XX
83 bool "STMicroelectronics SPEAr PCIe controller" 99 bool "STMicroelectronics SPEAr PCIe controller"
84 depends on PCI 100 depends on ARCH_SPEAR13XX || COMPILE_TEST
85 depends on ARCH_SPEAR13XX
86 depends on PCI_MSI_IRQ_DOMAIN 101 depends on PCI_MSI_IRQ_DOMAIN
87 select PCIEPORTBUS
88 select PCIE_DW_HOST 102 select PCIE_DW_HOST
89 help 103 help
90 Say Y here if you want PCIe support on SPEAr13XX SoCs. 104 Say Y here if you want PCIe support on SPEAr13XX SoCs.
91 105
92config PCI_KEYSTONE 106config PCI_KEYSTONE
93 bool "TI Keystone PCIe controller" 107 bool "TI Keystone PCIe controller"
94 depends on PCI 108 depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
95 depends on ARCH_KEYSTONE
96 depends on PCI_MSI_IRQ_DOMAIN 109 depends on PCI_MSI_IRQ_DOMAIN
97 select PCIEPORTBUS
98 select PCIE_DW_HOST 110 select PCIE_DW_HOST
99 help 111 help
100 Say Y here if you want to enable PCI controller support on Keystone 112 Say Y here if you want to enable PCI controller support on Keystone
@@ -104,8 +116,7 @@ config PCI_KEYSTONE
104 116
105config PCI_LAYERSCAPE 117config PCI_LAYERSCAPE
106 bool "Freescale Layerscape PCIe controller" 118 bool "Freescale Layerscape PCIe controller"
107 depends on PCI 119 depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
108 depends on OF && (ARM || ARCH_LAYERSCAPE)
109 depends on PCI_MSI_IRQ_DOMAIN 120 depends on PCI_MSI_IRQ_DOMAIN
110 select MFD_SYSCON 121 select MFD_SYSCON
111 select PCIE_DW_HOST 122 select PCIE_DW_HOST
@@ -113,11 +124,9 @@ config PCI_LAYERSCAPE
113 Say Y here if you want PCIe controller support on Layerscape SoCs. 124 Say Y here if you want PCIe controller support on Layerscape SoCs.
114 125
115config PCI_HISI 126config PCI_HISI
116 depends on OF && ARM64 127 depends on OF && (ARM64 || COMPILE_TEST)
117 bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" 128 bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
118 depends on PCI
119 depends on PCI_MSI_IRQ_DOMAIN 129 depends on PCI_MSI_IRQ_DOMAIN
120 select PCIEPORTBUS
121 select PCIE_DW_HOST 130 select PCIE_DW_HOST
122 select PCI_HOST_COMMON 131 select PCI_HOST_COMMON
123 help 132 help
@@ -126,10 +135,8 @@ config PCI_HISI
126 135
127config PCIE_QCOM 136config PCIE_QCOM
128 bool "Qualcomm PCIe controller" 137 bool "Qualcomm PCIe controller"
129 depends on PCI 138 depends on OF && (ARCH_QCOM || COMPILE_TEST)
130 depends on ARCH_QCOM && OF
131 depends on PCI_MSI_IRQ_DOMAIN 139 depends on PCI_MSI_IRQ_DOMAIN
132 select PCIEPORTBUS
133 select PCIE_DW_HOST 140 select PCIE_DW_HOST
134 help 141 help
135 Say Y here to enable PCIe controller support on Qualcomm SoCs. The 142 Say Y here to enable PCIe controller support on Qualcomm SoCs. The
@@ -138,10 +145,8 @@ config PCIE_QCOM
138 145
139config PCIE_ARMADA_8K 146config PCIE_ARMADA_8K
140 bool "Marvell Armada-8K PCIe controller" 147 bool "Marvell Armada-8K PCIe controller"
141 depends on PCI 148 depends on ARCH_MVEBU || COMPILE_TEST
142 depends on ARCH_MVEBU
143 depends on PCI_MSI_IRQ_DOMAIN 149 depends on PCI_MSI_IRQ_DOMAIN
144 select PCIEPORTBUS
145 select PCIE_DW_HOST 150 select PCIE_DW_HOST
146 help 151 help
147 Say Y here if you want to enable PCIe controller support on 152 Say Y here if you want to enable PCIe controller support on
@@ -154,9 +159,8 @@ config PCIE_ARTPEC6
154 159
155config PCIE_ARTPEC6_HOST 160config PCIE_ARTPEC6_HOST
156 bool "Axis ARTPEC-6 PCIe controller Host Mode" 161 bool "Axis ARTPEC-6 PCIe controller Host Mode"
157 depends on MACH_ARTPEC6 162 depends on MACH_ARTPEC6 || COMPILE_TEST
158 depends on PCI && PCI_MSI_IRQ_DOMAIN 163 depends on PCI_MSI_IRQ_DOMAIN
159 select PCIEPORTBUS
160 select PCIE_DW_HOST 164 select PCIE_DW_HOST
161 select PCIE_ARTPEC6 165 select PCIE_ARTPEC6
162 help 166 help
@@ -165,7 +169,7 @@ config PCIE_ARTPEC6_HOST
165 169
166config PCIE_ARTPEC6_EP 170config PCIE_ARTPEC6_EP
167 bool "Axis ARTPEC-6 PCIe controller Endpoint Mode" 171 bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
168 depends on MACH_ARTPEC6 172 depends on MACH_ARTPEC6 || COMPILE_TEST
169 depends on PCI_ENDPOINT 173 depends on PCI_ENDPOINT
170 select PCIE_DW_EP 174 select PCIE_DW_EP
171 select PCIE_ARTPEC6 175 select PCIE_ARTPEC6
@@ -174,11 +178,9 @@ config PCIE_ARTPEC6_EP
174 endpoint mode. This uses the DesignWare core. 178 endpoint mode. This uses the DesignWare core.
175 179
176config PCIE_KIRIN 180config PCIE_KIRIN
177 depends on OF && ARM64 181 depends on OF && (ARM64 || COMPILE_TEST)
178 bool "HiSilicon Kirin series SoCs PCIe controllers" 182 bool "HiSilicon Kirin series SoCs PCIe controllers"
179 depends on PCI_MSI_IRQ_DOMAIN 183 depends on PCI_MSI_IRQ_DOMAIN
180 depends on PCI
181 select PCIEPORTBUS
182 select PCIE_DW_HOST 184 select PCIE_DW_HOST
183 help 185 help
184 Say Y here if you want PCIe controller support 186 Say Y here if you want PCIe controller support
@@ -186,10 +188,8 @@ config PCIE_KIRIN
186 188
187config PCIE_HISI_STB 189config PCIE_HISI_STB
188 bool "HiSilicon STB SoCs PCIe controllers" 190 bool "HiSilicon STB SoCs PCIe controllers"
189 depends on ARCH_HISI 191 depends on ARCH_HISI || COMPILE_TEST
190 depends on PCI
191 depends on PCI_MSI_IRQ_DOMAIN 192 depends on PCI_MSI_IRQ_DOMAIN
192 select PCIEPORTBUS
193 select PCIE_DW_HOST 193 select PCIE_DW_HOST
194 help 194 help
195 Say Y here if you want PCIe controller support on HiSilicon STB SoCs 195 Say Y here if you want PCIe controller support on HiSilicon STB SoCs
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index ed8558d638e5..f688204e50c5 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -27,6 +27,7 @@
27#include <linux/mfd/syscon.h> 27#include <linux/mfd/syscon.h>
28#include <linux/regmap.h> 28#include <linux/regmap.h>
29 29
30#include "../pci.h"
30#include "pcie-designware.h" 31#include "pcie-designware.h"
31 32
32/* PCIe controller wrapper DRA7XX configuration registers */ 33/* PCIe controller wrapper DRA7XX configuration registers */
@@ -406,14 +407,14 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
406 ep->ops = &pcie_ep_ops; 407 ep->ops = &pcie_ep_ops;
407 408
408 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); 409 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
409 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); 410 pci->dbi_base = devm_ioremap_resource(dev, res);
410 if (!pci->dbi_base) 411 if (IS_ERR(pci->dbi_base))
411 return -ENOMEM; 412 return PTR_ERR(pci->dbi_base);
412 413
413 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); 414 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
414 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res)); 415 pci->dbi_base2 = devm_ioremap_resource(dev, res);
415 if (!pci->dbi_base2) 416 if (IS_ERR(pci->dbi_base2))
416 return -ENOMEM; 417 return PTR_ERR(pci->dbi_base2);
417 418
418 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 419 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
419 if (!res) 420 if (!res)
@@ -459,9 +460,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
459 return ret; 460 return ret;
460 461
461 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); 462 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
462 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); 463 pci->dbi_base = devm_ioremap_resource(dev, res);
463 if (!pci->dbi_base) 464 if (IS_ERR(pci->dbi_base))
464 return -ENOMEM; 465 return PTR_ERR(pci->dbi_base);
465 466
466 pp->ops = &dra7xx_pcie_host_ops; 467 pp->ops = &dra7xx_pcie_host_ops;
467 468
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 4818ef875f8a..80f604602783 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -338,7 +338,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
338 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 338 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
339 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); 339 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
340 break; 340 break;
341 case IMX6QP: /* FALLTHROUGH */ 341 case IMX6QP: /* FALLTHROUGH */
342 case IMX6Q: 342 case IMX6Q:
343 /* power up core phy and enable ref clock */ 343 /* power up core phy and enable ref clock */
344 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 344 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index d55ae0716adf..3722a5f31e5e 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -89,7 +89,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
89 dw_pcie_setup_rc(pp); 89 dw_pcie_setup_rc(pp);
90 90
91 if (dw_pcie_link_up(pci)) { 91 if (dw_pcie_link_up(pci)) {
92 dev_err(dev, "Link already up\n"); 92 dev_info(dev, "Link already up\n");
93 return 0; 93 return 0;
94 } 94 }
95 95
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index b587352f8b9f..072fd7ecc29f 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -28,6 +28,7 @@
28struct armada8k_pcie { 28struct armada8k_pcie {
29 struct dw_pcie *pci; 29 struct dw_pcie *pci;
30 struct clk *clk; 30 struct clk *clk;
31 struct clk *clk_reg;
31}; 32};
32 33
33#define PCIE_VENDOR_REGS_OFFSET 0x8000 34#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -229,26 +230,38 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
229 if (ret) 230 if (ret)
230 return ret; 231 return ret;
231 232
233 pcie->clk_reg = devm_clk_get(dev, "reg");
234 if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
235 ret = -EPROBE_DEFER;
236 goto fail;
237 }
238 if (!IS_ERR(pcie->clk_reg)) {
239 ret = clk_prepare_enable(pcie->clk_reg);
240 if (ret)
241 goto fail_clkreg;
242 }
243
232 /* Get the dw-pcie unit configuration/control registers base. */ 244 /* Get the dw-pcie unit configuration/control registers base. */
233 base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); 245 base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
234 pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); 246 pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
235 if (IS_ERR(pci->dbi_base)) { 247 if (IS_ERR(pci->dbi_base)) {
236 dev_err(dev, "couldn't remap regs base %p\n", base); 248 dev_err(dev, "couldn't remap regs base %p\n", base);
237 ret = PTR_ERR(pci->dbi_base); 249 ret = PTR_ERR(pci->dbi_base);
238 goto fail; 250 goto fail_clkreg;
239 } 251 }
240 252
241 platform_set_drvdata(pdev, pcie); 253 platform_set_drvdata(pdev, pcie);
242 254
243 ret = armada8k_add_pcie_port(pcie, pdev); 255 ret = armada8k_add_pcie_port(pcie, pdev);
244 if (ret) 256 if (ret)
245 goto fail; 257 goto fail_clkreg;
246 258
247 return 0; 259 return 0;
248 260
261fail_clkreg:
262 clk_disable_unprepare(pcie->clk_reg);
249fail: 263fail:
250 if (!IS_ERR(pcie->clk)) 264 clk_disable_unprepare(pcie->clk);
251 clk_disable_unprepare(pcie->clk);
252 265
253 return ret; 266 return ret;
254} 267}
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index e66cede2b5b7..321b56cfd5d0 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -463,9 +463,9 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
463 ep->ops = &pcie_ep_ops; 463 ep->ops = &pcie_ep_ops;
464 464
465 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); 465 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
466 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res)); 466 pci->dbi_base2 = devm_ioremap_resource(dev, res);
467 if (!pci->dbi_base2) 467 if (IS_ERR(pci->dbi_base2))
468 return -ENOMEM; 468 return PTR_ERR(pci->dbi_base2);
469 469
470 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 470 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
471 if (!res) 471 if (!res)
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
index f07678bf7cfc..1eec4415a77f 100644
--- a/drivers/pci/dwc/pcie-designware-ep.c
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -75,7 +75,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
75 75
76 free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); 76 free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
77 if (free_win >= ep->num_ib_windows) { 77 if (free_win >= ep->num_ib_windows) {
78 dev_err(pci->dev, "no free inbound window\n"); 78 dev_err(pci->dev, "No free inbound window\n");
79 return -EINVAL; 79 return -EINVAL;
80 } 80 }
81 81
@@ -100,7 +100,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
100 100
101 free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); 101 free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
102 if (free_win >= ep->num_ob_windows) { 102 if (free_win >= ep->num_ob_windows) {
103 dev_err(pci->dev, "no free outbound window\n"); 103 dev_err(pci->dev, "No free outbound window\n");
104 return -EINVAL; 104 return -EINVAL;
105 } 105 }
106 106
@@ -204,7 +204,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
204 204
205 ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); 205 ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
206 if (ret) { 206 if (ret) {
207 dev_err(pci->dev, "failed to enable address\n"); 207 dev_err(pci->dev, "Failed to enable address\n");
208 return ret; 208 return ret;
209 } 209 }
210 210
@@ -348,21 +348,21 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
348 348
349 ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); 349 ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
350 if (ret < 0) { 350 if (ret < 0) {
351 dev_err(dev, "unable to read *num-ib-windows* property\n"); 351 dev_err(dev, "Unable to read *num-ib-windows* property\n");
352 return ret; 352 return ret;
353 } 353 }
354 if (ep->num_ib_windows > MAX_IATU_IN) { 354 if (ep->num_ib_windows > MAX_IATU_IN) {
355 dev_err(dev, "invalid *num-ib-windows*\n"); 355 dev_err(dev, "Invalid *num-ib-windows*\n");
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
358 358
359 ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); 359 ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
360 if (ret < 0) { 360 if (ret < 0) {
361 dev_err(dev, "unable to read *num-ob-windows* property\n"); 361 dev_err(dev, "Unable to read *num-ob-windows* property\n");
362 return ret; 362 return ret;
363 } 363 }
364 if (ep->num_ob_windows > MAX_IATU_OUT) { 364 if (ep->num_ob_windows > MAX_IATU_OUT) {
365 dev_err(dev, "invalid *num-ob-windows*\n"); 365 dev_err(dev, "Invalid *num-ob-windows*\n");
366 return -EINVAL; 366 return -EINVAL;
367 } 367 }
368 368
@@ -389,7 +389,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
389 389
390 epc = devm_pci_epc_create(dev, &epc_ops); 390 epc = devm_pci_epc_create(dev, &epc_ops);
391 if (IS_ERR(epc)) { 391 if (IS_ERR(epc)) {
392 dev_err(dev, "failed to create epc device\n"); 392 dev_err(dev, "Failed to create epc device\n");
393 return PTR_ERR(epc); 393 return PTR_ERR(epc);
394 } 394 }
395 395
@@ -411,6 +411,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
411 return -ENOMEM; 411 return -ENOMEM;
412 } 412 }
413 413
414 epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
415 EPC_FEATURE_SET_BAR(epc->features, BAR_0);
416
414 ep->epc = epc; 417 ep->epc = epc;
415 epc_set_drvdata(epc, ep); 418 epc_set_drvdata(epc, ep);
416 dw_pcie_setup(pci); 419 dw_pcie_setup(pci);
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 6c409079d514..cba1432e395d 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -15,6 +15,7 @@
15#include <linux/pci_regs.h> 15#include <linux/pci_regs.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17 17
18#include "../pci.h"
18#include "pcie-designware.h" 19#include "pcie-designware.h"
19 20
20static struct pci_ops dw_pcie_ops; 21static struct pci_ops dw_pcie_ops;
@@ -83,18 +84,23 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
83 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 84 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
84 85
85 for (i = 0; i < num_ctrls; i++) { 86 for (i = 0; i < num_ctrls; i++) {
86 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, 87 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
87 &val); 88 (i * MSI_REG_CTRL_BLOCK_SIZE),
89 4, &val);
88 if (!val) 90 if (!val)
89 continue; 91 continue;
90 92
91 ret = IRQ_HANDLED; 93 ret = IRQ_HANDLED;
92 pos = 0; 94 pos = 0;
93 while ((pos = find_next_bit((unsigned long *) &val, 32, 95 while ((pos = find_next_bit((unsigned long *) &val,
94 pos)) != 32) { 96 MAX_MSI_IRQS_PER_CTRL,
95 irq = irq_find_mapping(pp->irq_domain, i * 32 + pos); 97 pos)) != MAX_MSI_IRQS_PER_CTRL) {
98 irq = irq_find_mapping(pp->irq_domain,
99 (i * MAX_MSI_IRQS_PER_CTRL) +
100 pos);
96 generic_handle_irq(irq); 101 generic_handle_irq(irq);
97 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 102 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
103 (i * MSI_REG_CTRL_BLOCK_SIZE),
98 4, 1 << pos); 104 4, 1 << pos);
99 pos++; 105 pos++;
100 } 106 }
@@ -157,9 +163,9 @@ static void dw_pci_bottom_mask(struct irq_data *data)
157 if (pp->ops->msi_clear_irq) { 163 if (pp->ops->msi_clear_irq) {
158 pp->ops->msi_clear_irq(pp, data->hwirq); 164 pp->ops->msi_clear_irq(pp, data->hwirq);
159 } else { 165 } else {
160 ctrl = data->hwirq / 32; 166 ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
161 res = ctrl * 12; 167 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
162 bit = data->hwirq % 32; 168 bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
163 169
164 pp->irq_status[ctrl] &= ~(1 << bit); 170 pp->irq_status[ctrl] &= ~(1 << bit);
165 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, 171 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -180,9 +186,9 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
180 if (pp->ops->msi_set_irq) { 186 if (pp->ops->msi_set_irq) {
181 pp->ops->msi_set_irq(pp, data->hwirq); 187 pp->ops->msi_set_irq(pp, data->hwirq);
182 } else { 188 } else {
183 ctrl = data->hwirq / 32; 189 ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
184 res = ctrl * 12; 190 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
185 bit = data->hwirq % 32; 191 bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
186 192
187 pp->irq_status[ctrl] |= 1 << bit; 193 pp->irq_status[ctrl] |= 1 << bit;
188 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, 194 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -248,8 +254,10 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
248 unsigned long flags; 254 unsigned long flags;
249 255
250 raw_spin_lock_irqsave(&pp->lock, flags); 256 raw_spin_lock_irqsave(&pp->lock, flags);
257
251 bitmap_release_region(pp->msi_irq_in_use, data->hwirq, 258 bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
252 order_base_2(nr_irqs)); 259 order_base_2(nr_irqs));
260
253 raw_spin_unlock_irqrestore(&pp->lock, flags); 261 raw_spin_unlock_irqrestore(&pp->lock, flags);
254} 262}
255 263
@@ -266,7 +274,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
266 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, 274 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
267 &dw_pcie_msi_domain_ops, pp); 275 &dw_pcie_msi_domain_ops, pp);
268 if (!pp->irq_domain) { 276 if (!pp->irq_domain) {
269 dev_err(pci->dev, "failed to create IRQ domain\n"); 277 dev_err(pci->dev, "Failed to create IRQ domain\n");
270 return -ENOMEM; 278 return -ENOMEM;
271 } 279 }
272 280
@@ -274,7 +282,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
274 &dw_pcie_msi_domain_info, 282 &dw_pcie_msi_domain_info,
275 pp->irq_domain); 283 pp->irq_domain);
276 if (!pp->msi_domain) { 284 if (!pp->msi_domain) {
277 dev_err(pci->dev, "failed to create MSI domain\n"); 285 dev_err(pci->dev, "Failed to create MSI domain\n");
278 irq_domain_remove(pp->irq_domain); 286 irq_domain_remove(pp->irq_domain);
279 return -ENOMEM; 287 return -ENOMEM;
280 } 288 }
@@ -301,13 +309,13 @@ void dw_pcie_msi_init(struct pcie_port *pp)
301 page = alloc_page(GFP_KERNEL); 309 page = alloc_page(GFP_KERNEL);
302 pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 310 pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
303 if (dma_mapping_error(dev, pp->msi_data)) { 311 if (dma_mapping_error(dev, pp->msi_data)) {
304 dev_err(dev, "failed to map MSI data\n"); 312 dev_err(dev, "Failed to map MSI data\n");
305 __free_page(page); 313 __free_page(page);
306 return; 314 return;
307 } 315 }
308 msi_target = (u64)pp->msi_data; 316 msi_target = (u64)pp->msi_data;
309 317
310 /* program the msi_data */ 318 /* Program the msi_data */
311 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, 319 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
312 lower_32_bits(msi_target)); 320 lower_32_bits(msi_target));
313 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 321 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
@@ -330,19 +338,19 @@ int dw_pcie_host_init(struct pcie_port *pp)
330 338
331 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 339 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
332 if (cfg_res) { 340 if (cfg_res) {
333 pp->cfg0_size = resource_size(cfg_res) / 2; 341 pp->cfg0_size = resource_size(cfg_res) >> 1;
334 pp->cfg1_size = resource_size(cfg_res) / 2; 342 pp->cfg1_size = resource_size(cfg_res) >> 1;
335 pp->cfg0_base = cfg_res->start; 343 pp->cfg0_base = cfg_res->start;
336 pp->cfg1_base = cfg_res->start + pp->cfg0_size; 344 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
337 } else if (!pp->va_cfg0_base) { 345 } else if (!pp->va_cfg0_base) {
338 dev_err(dev, "missing *config* reg space\n"); 346 dev_err(dev, "Missing *config* reg space\n");
339 } 347 }
340 348
341 bridge = pci_alloc_host_bridge(0); 349 bridge = pci_alloc_host_bridge(0);
342 if (!bridge) 350 if (!bridge)
343 return -ENOMEM; 351 return -ENOMEM;
344 352
345 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, 353 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
346 &bridge->windows, &pp->io_base); 354 &bridge->windows, &pp->io_base);
347 if (ret) 355 if (ret)
348 return ret; 356 return ret;
@@ -357,7 +365,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
357 case IORESOURCE_IO: 365 case IORESOURCE_IO:
358 ret = pci_remap_iospace(win->res, pp->io_base); 366 ret = pci_remap_iospace(win->res, pp->io_base);
359 if (ret) { 367 if (ret) {
360 dev_warn(dev, "error %d: failed to map resource %pR\n", 368 dev_warn(dev, "Error %d: failed to map resource %pR\n",
361 ret, win->res); 369 ret, win->res);
362 resource_list_destroy_entry(win); 370 resource_list_destroy_entry(win);
363 } else { 371 } else {
@@ -375,8 +383,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
375 break; 383 break;
376 case 0: 384 case 0:
377 pp->cfg = win->res; 385 pp->cfg = win->res;
378 pp->cfg0_size = resource_size(pp->cfg) / 2; 386 pp->cfg0_size = resource_size(pp->cfg) >> 1;
379 pp->cfg1_size = resource_size(pp->cfg) / 2; 387 pp->cfg1_size = resource_size(pp->cfg) >> 1;
380 pp->cfg0_base = pp->cfg->start; 388 pp->cfg0_base = pp->cfg->start;
381 pp->cfg1_base = pp->cfg->start + pp->cfg0_size; 389 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
382 break; 390 break;
@@ -391,7 +399,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
391 pp->cfg->start, 399 pp->cfg->start,
392 resource_size(pp->cfg)); 400 resource_size(pp->cfg));
393 if (!pci->dbi_base) { 401 if (!pci->dbi_base) {
394 dev_err(dev, "error with ioremap\n"); 402 dev_err(dev, "Error with ioremap\n");
395 ret = -ENOMEM; 403 ret = -ENOMEM;
396 goto error; 404 goto error;
397 } 405 }
@@ -403,7 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
403 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, 411 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
404 pp->cfg0_base, pp->cfg0_size); 412 pp->cfg0_base, pp->cfg0_size);
405 if (!pp->va_cfg0_base) { 413 if (!pp->va_cfg0_base) {
406 dev_err(dev, "error with ioremap in function\n"); 414 dev_err(dev, "Error with ioremap in function\n");
407 ret = -ENOMEM; 415 ret = -ENOMEM;
408 goto error; 416 goto error;
409 } 417 }
@@ -414,7 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
414 pp->cfg1_base, 422 pp->cfg1_base,
415 pp->cfg1_size); 423 pp->cfg1_size);
416 if (!pp->va_cfg1_base) { 424 if (!pp->va_cfg1_base) {
417 dev_err(dev, "error with ioremap\n"); 425 dev_err(dev, "Error with ioremap\n");
418 ret = -ENOMEM; 426 ret = -ENOMEM;
419 goto error; 427 goto error;
420 } 428 }
@@ -586,7 +594,7 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
586 return 0; 594 return 0;
587 } 595 }
588 596
589 /* access only one slot on each root port */ 597 /* Access only one slot on each root port */
590 if (bus->number == pp->root_bus_nr && dev > 0) 598 if (bus->number == pp->root_bus_nr && dev > 0)
591 return 0; 599 return 0;
592 600
@@ -650,13 +658,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
650 658
651 /* Initialize IRQ Status array */ 659 /* Initialize IRQ Status array */
652 for (ctrl = 0; ctrl < num_ctrls; ctrl++) 660 for (ctrl = 0; ctrl < num_ctrls; ctrl++)
653 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4, 661 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
654 &pp->irq_status[ctrl]); 662 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
655 /* setup RC BARs */ 663 4, &pp->irq_status[ctrl]);
664
665 /* Setup RC BARs */
656 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); 666 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
657 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); 667 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
658 668
659 /* setup interrupt pins */ 669 /* Setup interrupt pins */
660 dw_pcie_dbi_ro_wr_en(pci); 670 dw_pcie_dbi_ro_wr_en(pci);
661 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); 671 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
662 val &= 0xffff00ff; 672 val &= 0xffff00ff;
@@ -664,13 +674,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
664 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); 674 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
665 dw_pcie_dbi_ro_wr_dis(pci); 675 dw_pcie_dbi_ro_wr_dis(pci);
666 676
667 /* setup bus numbers */ 677 /* Setup bus numbers */
668 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 678 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
669 val &= 0xff000000; 679 val &= 0xff000000;
670 val |= 0x00ff0100; 680 val |= 0x00ff0100;
671 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 681 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
672 682
673 /* setup command register */ 683 /* Setup command register */
674 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 684 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
675 val &= 0xffff0000; 685 val &= 0xffff0000;
676 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 686 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -683,7 +693,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
683 * we should not program the ATU here. 693 * we should not program the ATU here.
684 */ 694 */
685 if (!pp->ops->rd_other_conf) { 695 if (!pp->ops->rd_other_conf) {
686 /* get iATU unroll support */ 696 /* Get iATU unroll support */
687 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); 697 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
688 dev_dbg(pci->dev, "iATU unroll: %s\n", 698 dev_dbg(pci->dev, "iATU unroll: %s\n",
689 pci->iatu_unroll_enabled ? "enabled" : "disabled"); 699 pci->iatu_unroll_enabled ? "enabled" : "disabled");
@@ -701,7 +711,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
701 711
702 /* Enable write permission for the DBI read-only register */ 712 /* Enable write permission for the DBI read-only register */
703 dw_pcie_dbi_ro_wr_en(pci); 713 dw_pcie_dbi_ro_wr_en(pci);
704 /* program correct class for RC */ 714 /* Program correct class for RC */
705 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); 715 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
706 /* Better disable write permission right after the update */ 716 /* Better disable write permission right after the update */
707 dw_pcie_dbi_ro_wr_dis(pci); 717 dw_pcie_dbi_ro_wr_dis(pci);
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 5416aa8a07a5..5937fed4c938 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -12,19 +12,29 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/of_device.h>
15#include <linux/of_gpio.h> 16#include <linux/of_gpio.h>
16#include <linux/pci.h> 17#include <linux/pci.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18#include <linux/resource.h> 19#include <linux/resource.h>
19#include <linux/signal.h> 20#include <linux/signal.h>
20#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/regmap.h>
21 23
22#include "pcie-designware.h" 24#include "pcie-designware.h"
23 25
24struct dw_plat_pcie { 26struct dw_plat_pcie {
25 struct dw_pcie *pci; 27 struct dw_pcie *pci;
28 struct regmap *regmap;
29 enum dw_pcie_device_mode mode;
26}; 30};
27 31
32struct dw_plat_pcie_of_data {
33 enum dw_pcie_device_mode mode;
34};
35
36static const struct of_device_id dw_plat_pcie_of_match[];
37
28static int dw_plat_pcie_host_init(struct pcie_port *pp) 38static int dw_plat_pcie_host_init(struct pcie_port *pp)
29{ 39{
30 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 40 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -38,13 +48,63 @@ static int dw_plat_pcie_host_init(struct pcie_port *pp)
38 return 0; 48 return 0;
39} 49}
40 50
51static void dw_plat_set_num_vectors(struct pcie_port *pp)
52{
53 pp->num_vectors = MAX_MSI_IRQS;
54}
55
41static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { 56static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
42 .host_init = dw_plat_pcie_host_init, 57 .host_init = dw_plat_pcie_host_init,
58 .set_num_vectors = dw_plat_set_num_vectors,
59};
60
61static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
62{
63 return 0;
64}
65
66static const struct dw_pcie_ops dw_pcie_ops = {
67 .start_link = dw_plat_pcie_establish_link,
43}; 68};
44 69
45static int dw_plat_add_pcie_port(struct pcie_port *pp, 70static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
71{
72 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
73 enum pci_barno bar;
74
75 for (bar = BAR_0; bar <= BAR_5; bar++)
76 dw_pcie_ep_reset_bar(pci, bar);
77}
78
79static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
80 enum pci_epc_irq_type type,
81 u8 interrupt_num)
82{
83 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
84
85 switch (type) {
86 case PCI_EPC_IRQ_LEGACY:
87 dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
88 return -EINVAL;
89 case PCI_EPC_IRQ_MSI:
90 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
91 default:
92 dev_err(pci->dev, "UNKNOWN IRQ type\n");
93 }
94
95 return 0;
96}
97
98static struct dw_pcie_ep_ops pcie_ep_ops = {
99 .ep_init = dw_plat_pcie_ep_init,
100 .raise_irq = dw_plat_pcie_ep_raise_irq,
101};
102
103static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
46 struct platform_device *pdev) 104 struct platform_device *pdev)
47{ 105{
106 struct dw_pcie *pci = dw_plat_pcie->pci;
107 struct pcie_port *pp = &pci->pp;
48 struct device *dev = &pdev->dev; 108 struct device *dev = &pdev->dev;
49 int ret; 109 int ret;
50 110
@@ -63,15 +123,44 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
63 123
64 ret = dw_pcie_host_init(pp); 124 ret = dw_pcie_host_init(pp);
65 if (ret) { 125 if (ret) {
66 dev_err(dev, "failed to initialize host\n"); 126 dev_err(dev, "Failed to initialize host\n");
67 return ret; 127 return ret;
68 } 128 }
69 129
70 return 0; 130 return 0;
71} 131}
72 132
73static const struct dw_pcie_ops dw_pcie_ops = { 133static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
74}; 134 struct platform_device *pdev)
135{
136 int ret;
137 struct dw_pcie_ep *ep;
138 struct resource *res;
139 struct device *dev = &pdev->dev;
140 struct dw_pcie *pci = dw_plat_pcie->pci;
141
142 ep = &pci->ep;
143 ep->ops = &pcie_ep_ops;
144
145 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
146 pci->dbi_base2 = devm_ioremap_resource(dev, res);
147 if (IS_ERR(pci->dbi_base2))
148 return PTR_ERR(pci->dbi_base2);
149
150 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
151 if (!res)
152 return -EINVAL;
153
154 ep->phys_base = res->start;
155 ep->addr_size = resource_size(res);
156
157 ret = dw_pcie_ep_init(ep);
158 if (ret) {
159 dev_err(dev, "Failed to initialize endpoint\n");
160 return ret;
161 }
162 return 0;
163}
75 164
76static int dw_plat_pcie_probe(struct platform_device *pdev) 165static int dw_plat_pcie_probe(struct platform_device *pdev)
77{ 166{
@@ -80,6 +169,16 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
80 struct dw_pcie *pci; 169 struct dw_pcie *pci;
81 struct resource *res; /* Resource from DT */ 170 struct resource *res; /* Resource from DT */
82 int ret; 171 int ret;
172 const struct of_device_id *match;
173 const struct dw_plat_pcie_of_data *data;
174 enum dw_pcie_device_mode mode;
175
176 match = of_match_device(dw_plat_pcie_of_match, dev);
177 if (!match)
178 return -EINVAL;
179
180 data = (struct dw_plat_pcie_of_data *)match->data;
181 mode = (enum dw_pcie_device_mode)data->mode;
83 182
84 dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); 183 dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
85 if (!dw_plat_pcie) 184 if (!dw_plat_pcie)
@@ -93,23 +192,59 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
93 pci->ops = &dw_pcie_ops; 192 pci->ops = &dw_pcie_ops;
94 193
95 dw_plat_pcie->pci = pci; 194 dw_plat_pcie->pci = pci;
195 dw_plat_pcie->mode = mode;
196
197 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
198 if (!res)
199 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
96 200
97 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
98 pci->dbi_base = devm_ioremap_resource(dev, res); 201 pci->dbi_base = devm_ioremap_resource(dev, res);
99 if (IS_ERR(pci->dbi_base)) 202 if (IS_ERR(pci->dbi_base))
100 return PTR_ERR(pci->dbi_base); 203 return PTR_ERR(pci->dbi_base);
101 204
102 platform_set_drvdata(pdev, dw_plat_pcie); 205 platform_set_drvdata(pdev, dw_plat_pcie);
103 206
104 ret = dw_plat_add_pcie_port(&pci->pp, pdev); 207 switch (dw_plat_pcie->mode) {
105 if (ret < 0) 208 case DW_PCIE_RC_TYPE:
106 return ret; 209 if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
210 return -ENODEV;
211
212 ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
213 if (ret < 0)
214 return ret;
215 break;
216 case DW_PCIE_EP_TYPE:
217 if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
218 return -ENODEV;
219
220 ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
221 if (ret < 0)
222 return ret;
223 break;
224 default:
225 dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
226 }
107 227
108 return 0; 228 return 0;
109} 229}
110 230
231static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
232 .mode = DW_PCIE_RC_TYPE,
233};
234
235static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
236 .mode = DW_PCIE_EP_TYPE,
237};
238
111static const struct of_device_id dw_plat_pcie_of_match[] = { 239static const struct of_device_id dw_plat_pcie_of_match[] = {
112 { .compatible = "snps,dw-pcie", }, 240 {
241 .compatible = "snps,dw-pcie",
242 .data = &dw_plat_pcie_rc_of_data,
243 },
244 {
245 .compatible = "snps,dw-pcie-ep",
246 .data = &dw_plat_pcie_ep_of_data,
247 },
113 {}, 248 {},
114}; 249};
115 250
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 1b7282e5b494..778c4f76a884 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -69,7 +69,7 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
69 69
70 ret = dw_pcie_read(base + reg, size, &val); 70 ret = dw_pcie_read(base + reg, size, &val);
71 if (ret) 71 if (ret)
72 dev_err(pci->dev, "read DBI address failed\n"); 72 dev_err(pci->dev, "Read DBI address failed\n");
73 73
74 return val; 74 return val;
75} 75}
@@ -86,7 +86,7 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
86 86
87 ret = dw_pcie_write(base + reg, size, val); 87 ret = dw_pcie_write(base + reg, size, val);
88 if (ret) 88 if (ret)
89 dev_err(pci->dev, "write DBI address failed\n"); 89 dev_err(pci->dev, "Write DBI address failed\n");
90} 90}
91 91
92static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) 92static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -137,7 +137,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
137 137
138 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 138 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
139 } 139 }
140 dev_err(pci->dev, "outbound iATU is not being enabled\n"); 140 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
141} 141}
142 142
143void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, 143void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
@@ -180,7 +180,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
180 180
181 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 181 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
182 } 182 }
183 dev_err(pci->dev, "outbound iATU is not being enabled\n"); 183 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
184} 184}
185 185
186static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) 186static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -238,7 +238,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
238 238
239 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 239 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
240 } 240 }
241 dev_err(pci->dev, "inbound iATU is not being enabled\n"); 241 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
242 242
243 return -EBUSY; 243 return -EBUSY;
244} 244}
@@ -284,7 +284,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
284 284
285 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 285 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
286 } 286 }
287 dev_err(pci->dev, "inbound iATU is not being enabled\n"); 287 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
288 288
289 return -EBUSY; 289 return -EBUSY;
290} 290}
@@ -313,16 +313,16 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
313{ 313{
314 int retries; 314 int retries;
315 315
316 /* check if the link is up or not */ 316 /* Check if the link is up or not */
317 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 317 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
318 if (dw_pcie_link_up(pci)) { 318 if (dw_pcie_link_up(pci)) {
319 dev_info(pci->dev, "link up\n"); 319 dev_info(pci->dev, "Link up\n");
320 return 0; 320 return 0;
321 } 321 }
322 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 322 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
323 } 323 }
324 324
325 dev_err(pci->dev, "phy link never came up\n"); 325 dev_err(pci->dev, "Phy link never came up\n");
326 326
327 return -ETIMEDOUT; 327 return -ETIMEDOUT;
328} 328}
@@ -351,7 +351,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
351 if (ret) 351 if (ret)
352 lanes = 0; 352 lanes = 0;
353 353
354 /* set the number of lanes */ 354 /* Set the number of lanes */
355 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); 355 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
356 val &= ~PORT_LINK_MODE_MASK; 356 val &= ~PORT_LINK_MODE_MASK;
357 switch (lanes) { 357 switch (lanes) {
@@ -373,7 +373,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
373 } 373 }
374 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); 374 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
375 375
376 /* set link width speed control register */ 376 /* Set link width speed control register */
377 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 377 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
378 val &= ~PORT_LOGIC_LINK_WIDTH_MASK; 378 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
379 switch (lanes) { 379 switch (lanes) {
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index fe811dbc12cf..bee4e2535a61 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -110,6 +110,7 @@
110#define MAX_MSI_IRQS 256 110#define MAX_MSI_IRQS 256
111#define MAX_MSI_IRQS_PER_CTRL 32 111#define MAX_MSI_IRQS_PER_CTRL 32
112#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) 112#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
113#define MSI_REG_CTRL_BLOCK_SIZE 12
113#define MSI_DEF_NUM_VECTORS 32 114#define MSI_DEF_NUM_VECTORS 32
114 115
115/* Maximum number of inbound/outbound iATUs */ 116/* Maximum number of inbound/outbound iATUs */
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5897af7d3355..a1d0198081a6 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -10,7 +10,7 @@
10 10
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/gpio.h> 13#include <linux/gpio/consumer.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/iopoll.h> 16#include <linux/iopoll.h>
@@ -19,6 +19,7 @@
19#include <linux/of_device.h> 19#include <linux/of_device.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/pm_runtime.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/phy/phy.h> 24#include <linux/phy/phy.h>
24#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
@@ -869,7 +870,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
869 870
870 /* enable PCIe clocks and resets */ 871 /* enable PCIe clocks and resets */
871 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 872 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
872 val &= !BIT(0); 873 val &= ~BIT(0);
873 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 874 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
874 875
875 /* change DBI base address */ 876 /* change DBI base address */
@@ -1088,6 +1089,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
1088 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1089 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1089 int ret; 1090 int ret;
1090 1091
1092 pm_runtime_get_sync(pci->dev);
1091 qcom_ep_reset_assert(pcie); 1093 qcom_ep_reset_assert(pcie);
1092 1094
1093 ret = pcie->ops->init(pcie); 1095 ret = pcie->ops->init(pcie);
@@ -1124,6 +1126,7 @@ err_disable_phy:
1124 phy_power_off(pcie->phy); 1126 phy_power_off(pcie->phy);
1125err_deinit: 1127err_deinit:
1126 pcie->ops->deinit(pcie); 1128 pcie->ops->deinit(pcie);
1129 pm_runtime_put(pci->dev);
1127 1130
1128 return ret; 1131 return ret;
1129} 1132}
@@ -1212,6 +1215,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1212 if (!pci) 1215 if (!pci)
1213 return -ENOMEM; 1216 return -ENOMEM;
1214 1217
1218 pm_runtime_enable(dev);
1215 pci->dev = dev; 1219 pci->dev = dev;
1216 pci->ops = &dw_pcie_ops; 1220 pci->ops = &dw_pcie_ops;
1217 pp = &pci->pp; 1221 pp = &pci->pp;
@@ -1257,14 +1261,17 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1257 } 1261 }
1258 1262
1259 ret = phy_init(pcie->phy); 1263 ret = phy_init(pcie->phy);
1260 if (ret) 1264 if (ret) {
1265 pm_runtime_disable(&pdev->dev);
1261 return ret; 1266 return ret;
1267 }
1262 1268
1263 platform_set_drvdata(pdev, pcie); 1269 platform_set_drvdata(pdev, pcie);
1264 1270
1265 ret = dw_pcie_host_init(pp); 1271 ret = dw_pcie_host_init(pp);
1266 if (ret) { 1272 if (ret) {
1267 dev_err(dev, "cannot initialize host\n"); 1273 dev_err(dev, "cannot initialize host\n");
1274 pm_runtime_disable(&pdev->dev);
1268 return ret; 1275 return ret;
1269 } 1276 }
1270 1277
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 7cef85124325..63ed706445b9 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -87,7 +87,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
87 87
88 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); 88 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
89 if (!src_addr) { 89 if (!src_addr) {
90 dev_err(dev, "failed to allocate source address\n"); 90 dev_err(dev, "Failed to allocate source address\n");
91 reg->status = STATUS_SRC_ADDR_INVALID; 91 reg->status = STATUS_SRC_ADDR_INVALID;
92 ret = -ENOMEM; 92 ret = -ENOMEM;
93 goto err; 93 goto err;
@@ -96,14 +96,14 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
96 ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr, 96 ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
97 reg->size); 97 reg->size);
98 if (ret) { 98 if (ret) {
99 dev_err(dev, "failed to map source address\n"); 99 dev_err(dev, "Failed to map source address\n");
100 reg->status = STATUS_SRC_ADDR_INVALID; 100 reg->status = STATUS_SRC_ADDR_INVALID;
101 goto err_src_addr; 101 goto err_src_addr;
102 } 102 }
103 103
104 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); 104 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
105 if (!dst_addr) { 105 if (!dst_addr) {
106 dev_err(dev, "failed to allocate destination address\n"); 106 dev_err(dev, "Failed to allocate destination address\n");
107 reg->status = STATUS_DST_ADDR_INVALID; 107 reg->status = STATUS_DST_ADDR_INVALID;
108 ret = -ENOMEM; 108 ret = -ENOMEM;
109 goto err_src_map_addr; 109 goto err_src_map_addr;
@@ -112,7 +112,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
112 ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr, 112 ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
113 reg->size); 113 reg->size);
114 if (ret) { 114 if (ret) {
115 dev_err(dev, "failed to map destination address\n"); 115 dev_err(dev, "Failed to map destination address\n");
116 reg->status = STATUS_DST_ADDR_INVALID; 116 reg->status = STATUS_DST_ADDR_INVALID;
117 goto err_dst_addr; 117 goto err_dst_addr;
118 } 118 }
@@ -149,7 +149,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
149 149
150 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 150 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
151 if (!src_addr) { 151 if (!src_addr) {
152 dev_err(dev, "failed to allocate address\n"); 152 dev_err(dev, "Failed to allocate address\n");
153 reg->status = STATUS_SRC_ADDR_INVALID; 153 reg->status = STATUS_SRC_ADDR_INVALID;
154 ret = -ENOMEM; 154 ret = -ENOMEM;
155 goto err; 155 goto err;
@@ -158,7 +158,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
158 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr, 158 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
159 reg->size); 159 reg->size);
160 if (ret) { 160 if (ret) {
161 dev_err(dev, "failed to map address\n"); 161 dev_err(dev, "Failed to map address\n");
162 reg->status = STATUS_SRC_ADDR_INVALID; 162 reg->status = STATUS_SRC_ADDR_INVALID;
163 goto err_addr; 163 goto err_addr;
164 } 164 }
@@ -201,7 +201,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
201 201
202 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 202 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
203 if (!dst_addr) { 203 if (!dst_addr) {
204 dev_err(dev, "failed to allocate address\n"); 204 dev_err(dev, "Failed to allocate address\n");
205 reg->status = STATUS_DST_ADDR_INVALID; 205 reg->status = STATUS_DST_ADDR_INVALID;
206 ret = -ENOMEM; 206 ret = -ENOMEM;
207 goto err; 207 goto err;
@@ -210,7 +210,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
210 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr, 210 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
211 reg->size); 211 reg->size);
212 if (ret) { 212 if (ret) {
213 dev_err(dev, "failed to map address\n"); 213 dev_err(dev, "Failed to map address\n");
214 reg->status = STATUS_DST_ADDR_INVALID; 214 reg->status = STATUS_DST_ADDR_INVALID;
215 goto err_addr; 215 goto err_addr;
216 } 216 }
@@ -230,7 +230,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
230 * wait 1ms inorder for the write to complete. Without this delay L3 230 * wait 1ms inorder for the write to complete. Without this delay L3
231 * error in observed in the host system. 231 * error in observed in the host system.
232 */ 232 */
233 mdelay(1); 233 usleep_range(1000, 2000);
234 234
235 kfree(buf); 235 kfree(buf);
236 236
@@ -379,7 +379,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
379 ret = pci_epc_set_bar(epc, epf->func_no, epf_bar); 379 ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
380 if (ret) { 380 if (ret) {
381 pci_epf_free_space(epf, epf_test->reg[bar], bar); 381 pci_epf_free_space(epf, epf_test->reg[bar], bar);
382 dev_err(dev, "failed to set BAR%d\n", bar); 382 dev_err(dev, "Failed to set BAR%d\n", bar);
383 if (bar == test_reg_bar) 383 if (bar == test_reg_bar)
384 return ret; 384 return ret;
385 } 385 }
@@ -406,7 +406,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
406 base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), 406 base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
407 test_reg_bar); 407 test_reg_bar);
408 if (!base) { 408 if (!base) {
409 dev_err(dev, "failed to allocated register space\n"); 409 dev_err(dev, "Failed to allocated register space\n");
410 return -ENOMEM; 410 return -ENOMEM;
411 } 411 }
412 epf_test->reg[test_reg_bar] = base; 412 epf_test->reg[test_reg_bar] = base;
@@ -416,7 +416,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
416 continue; 416 continue;
417 base = pci_epf_alloc_space(epf, bar_size[bar], bar); 417 base = pci_epf_alloc_space(epf, bar_size[bar], bar);
418 if (!base) 418 if (!base)
419 dev_err(dev, "failed to allocate space for BAR%d\n", 419 dev_err(dev, "Failed to allocate space for BAR%d\n",
420 bar); 420 bar);
421 epf_test->reg[bar] = base; 421 epf_test->reg[bar] = base;
422 } 422 }
@@ -435,9 +435,16 @@ static int pci_epf_test_bind(struct pci_epf *epf)
435 if (WARN_ON_ONCE(!epc)) 435 if (WARN_ON_ONCE(!epc))
436 return -EINVAL; 436 return -EINVAL;
437 437
438 if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
439 epf_test->linkup_notifier = false;
440 else
441 epf_test->linkup_notifier = true;
442
443 epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
444
438 ret = pci_epc_write_header(epc, epf->func_no, header); 445 ret = pci_epc_write_header(epc, epf->func_no, header);
439 if (ret) { 446 if (ret) {
440 dev_err(dev, "configuration header write failed\n"); 447 dev_err(dev, "Configuration header write failed\n");
441 return ret; 448 return ret;
442 } 449 }
443 450
@@ -519,7 +526,7 @@ static int __init pci_epf_test_init(void)
519 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 526 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
520 ret = pci_epf_register_driver(&test_driver); 527 ret = pci_epf_register_driver(&test_driver);
521 if (ret) { 528 if (ret) {
522 pr_err("failed to register pci epf test driver --> %d\n", ret); 529 pr_err("Failed to register pci epf test driver --> %d\n", ret);
523 return ret; 530 return ret;
524 } 531 }
525 532
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 465b5f058b6d..523a8cab3bfb 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -15,6 +15,8 @@
15#include <linux/pci-epf.h> 15#include <linux/pci-epf.h>
16#include <linux/pci-ep-cfs.h> 16#include <linux/pci-ep-cfs.h>
17 17
18static DEFINE_MUTEX(pci_epf_mutex);
19
18static struct bus_type pci_epf_bus_type; 20static struct bus_type pci_epf_bus_type;
19static const struct device_type pci_epf_type; 21static const struct device_type pci_epf_type;
20 22
@@ -143,7 +145,13 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
143 */ 145 */
144void pci_epf_unregister_driver(struct pci_epf_driver *driver) 146void pci_epf_unregister_driver(struct pci_epf_driver *driver)
145{ 147{
146 pci_ep_cfs_remove_epf_group(driver->group); 148 struct config_group *group;
149
150 mutex_lock(&pci_epf_mutex);
151 list_for_each_entry(group, &driver->epf_group, group_entry)
152 pci_ep_cfs_remove_epf_group(group);
153 list_del(&driver->epf_group);
154 mutex_unlock(&pci_epf_mutex);
147 driver_unregister(&driver->driver); 155 driver_unregister(&driver->driver);
148} 156}
149EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); 157EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
@@ -159,6 +167,8 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
159 struct module *owner) 167 struct module *owner)
160{ 168{
161 int ret; 169 int ret;
170 struct config_group *group;
171 const struct pci_epf_device_id *id;
162 172
163 if (!driver->ops) 173 if (!driver->ops)
164 return -EINVAL; 174 return -EINVAL;
@@ -173,7 +183,16 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
173 if (ret) 183 if (ret)
174 return ret; 184 return ret;
175 185
176 driver->group = pci_ep_cfs_add_epf_group(driver->driver.name); 186 INIT_LIST_HEAD(&driver->epf_group);
187
188 id = driver->id_table;
189 while (id->name[0]) {
190 group = pci_ep_cfs_add_epf_group(id->name);
191 mutex_lock(&pci_epf_mutex);
192 list_add_tail(&group->group_entry, &driver->epf_group);
193 mutex_unlock(&pci_epf_mutex);
194 id++;
195 }
177 196
178 return 0; 197 return 0;
179} 198}
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 0d0177ce436c..a96e23bda664 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,13 +5,14 @@ menu "PCI host controller drivers"
5 5
6config PCI_MVEBU 6config PCI_MVEBU
7 bool "Marvell EBU PCIe controller" 7 bool "Marvell EBU PCIe controller"
8 depends on ARCH_MVEBU || ARCH_DOVE 8 depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
9 depends on MVEBU_MBUS
9 depends on ARM 10 depends on ARM
10 depends on OF 11 depends on OF
11 12
12config PCI_AARDVARK 13config PCI_AARDVARK
13 bool "Aardvark PCIe controller" 14 bool "Aardvark PCIe controller"
14 depends on ARCH_MVEBU && ARM64 15 depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
15 depends on OF 16 depends on OF
16 depends on PCI_MSI_IRQ_DOMAIN 17 depends on PCI_MSI_IRQ_DOMAIN
17 help 18 help
@@ -21,7 +22,7 @@ config PCI_AARDVARK
21 22
22config PCIE_XILINX_NWL 23config PCIE_XILINX_NWL
23 bool "NWL PCIe Core" 24 bool "NWL PCIe Core"
24 depends on ARCH_ZYNQMP 25 depends on ARCH_ZYNQMP || COMPILE_TEST
25 depends on PCI_MSI_IRQ_DOMAIN 26 depends on PCI_MSI_IRQ_DOMAIN
26 help 27 help
27 Say 'Y' here if you want kernel support for Xilinx 28 Say 'Y' here if you want kernel support for Xilinx
@@ -32,12 +33,11 @@ config PCIE_XILINX_NWL
32config PCI_FTPCI100 33config PCI_FTPCI100
33 bool "Faraday Technology FTPCI100 PCI controller" 34 bool "Faraday Technology FTPCI100 PCI controller"
34 depends on OF 35 depends on OF
35 depends on ARM
36 default ARCH_GEMINI 36 default ARCH_GEMINI
37 37
38config PCI_TEGRA 38config PCI_TEGRA
39 bool "NVIDIA Tegra PCIe controller" 39 bool "NVIDIA Tegra PCIe controller"
40 depends on ARCH_TEGRA 40 depends on ARCH_TEGRA || COMPILE_TEST
41 depends on PCI_MSI_IRQ_DOMAIN 41 depends on PCI_MSI_IRQ_DOMAIN
42 help 42 help
43 Say Y here if you want support for the PCIe host controller found 43 Say Y here if you want support for the PCIe host controller found
@@ -45,8 +45,8 @@ config PCI_TEGRA
45 45
46config PCI_RCAR_GEN2 46config PCI_RCAR_GEN2
47 bool "Renesas R-Car Gen2 Internal PCI controller" 47 bool "Renesas R-Car Gen2 Internal PCI controller"
48 depends on ARM
49 depends on ARCH_RENESAS || COMPILE_TEST 48 depends on ARCH_RENESAS || COMPILE_TEST
49 depends on ARM
50 help 50 help
51 Say Y here if you want internal PCI support on R-Car Gen2 SoC. 51 Say Y here if you want internal PCI support on R-Car Gen2 SoC.
52 There are 3 internal PCI controllers available with a single 52 There are 3 internal PCI controllers available with a single
@@ -54,7 +54,7 @@ config PCI_RCAR_GEN2
54 54
55config PCIE_RCAR 55config PCIE_RCAR
56 bool "Renesas R-Car PCIe controller" 56 bool "Renesas R-Car PCIe controller"
57 depends on ARCH_RENESAS || (ARM && COMPILE_TEST) 57 depends on ARCH_RENESAS || COMPILE_TEST
58 depends on PCI_MSI_IRQ_DOMAIN 58 depends on PCI_MSI_IRQ_DOMAIN
59 help 59 help
60 Say Y here if you want PCIe controller support on R-Car SoCs. 60 Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -65,25 +65,25 @@ config PCI_HOST_COMMON
65 65
66config PCI_HOST_GENERIC 66config PCI_HOST_GENERIC
67 bool "Generic PCI host controller" 67 bool "Generic PCI host controller"
68 depends on (ARM || ARM64) && OF 68 depends on OF
69 select PCI_HOST_COMMON 69 select PCI_HOST_COMMON
70 select IRQ_DOMAIN 70 select IRQ_DOMAIN
71 select PCI_DOMAINS
71 help 72 help
72 Say Y here if you want to support a simple generic PCI host 73 Say Y here if you want to support a simple generic PCI host
73 controller, such as the one emulated by kvmtool. 74 controller, such as the one emulated by kvmtool.
74 75
75config PCIE_XILINX 76config PCIE_XILINX
76 bool "Xilinx AXI PCIe host bridge support" 77 bool "Xilinx AXI PCIe host bridge support"
77 depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) 78 depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
78 help 79 help
79 Say 'Y' here if you want kernel to support the Xilinx AXI PCIe 80 Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
80 Host Bridge driver. 81 Host Bridge driver.
81 82
82config PCI_XGENE 83config PCI_XGENE
83 bool "X-Gene PCIe controller" 84 bool "X-Gene PCIe controller"
84 depends on ARM64 85 depends on ARM64 || COMPILE_TEST
85 depends on OF || (ACPI && PCI_QUIRKS) 86 depends on OF || (ACPI && PCI_QUIRKS)
86 select PCIEPORTBUS
87 help 87 help
88 Say Y here if you want internal PCI support on APM X-Gene SoC. 88 Say Y here if you want internal PCI support on APM X-Gene SoC.
89 There are 5 internal PCIe ports available. Each port is GEN3 capable 89 There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -101,7 +101,7 @@ config PCI_XGENE_MSI
101config PCI_V3_SEMI 101config PCI_V3_SEMI
102 bool "V3 Semiconductor PCI controller" 102 bool "V3 Semiconductor PCI controller"
103 depends on OF 103 depends on OF
104 depends on ARM 104 depends on ARM || COMPILE_TEST
105 default ARCH_INTEGRATOR_AP 105 default ARCH_INTEGRATOR_AP
106 106
107config PCI_VERSATILE 107config PCI_VERSATILE
@@ -147,8 +147,7 @@ config PCIE_IPROC_MSI
147 147
148config PCIE_ALTERA 148config PCIE_ALTERA
149 bool "Altera PCIe controller" 149 bool "Altera PCIe controller"
150 depends on ARM || NIOS2 150 depends on ARM || NIOS2 || COMPILE_TEST
151 depends on OF_PCI
152 select PCI_DOMAINS 151 select PCI_DOMAINS
153 help 152 help
154 Say Y here if you want to enable PCIe controller support on Altera 153 Say Y here if you want to enable PCIe controller support on Altera
@@ -164,7 +163,7 @@ config PCIE_ALTERA_MSI
164 163
165config PCI_HOST_THUNDER_PEM 164config PCI_HOST_THUNDER_PEM
166 bool "Cavium Thunder PCIe controller to off-chip devices" 165 bool "Cavium Thunder PCIe controller to off-chip devices"
167 depends on ARM64 166 depends on ARM64 || COMPILE_TEST
168 depends on OF || (ACPI && PCI_QUIRKS) 167 depends on OF || (ACPI && PCI_QUIRKS)
169 select PCI_HOST_COMMON 168 select PCI_HOST_COMMON
170 help 169 help
@@ -172,29 +171,45 @@ config PCI_HOST_THUNDER_PEM
172 171
173config PCI_HOST_THUNDER_ECAM 172config PCI_HOST_THUNDER_ECAM
174 bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon" 173 bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
175 depends on ARM64 174 depends on ARM64 || COMPILE_TEST
176 depends on OF || (ACPI && PCI_QUIRKS) 175 depends on OF || (ACPI && PCI_QUIRKS)
177 select PCI_HOST_COMMON 176 select PCI_HOST_COMMON
178 help 177 help
179 Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. 178 Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
180 179
181config PCIE_ROCKCHIP 180config PCIE_ROCKCHIP
182 tristate "Rockchip PCIe controller" 181 bool
182 depends on PCI
183
184config PCIE_ROCKCHIP_HOST
185 tristate "Rockchip PCIe host controller"
183 depends on ARCH_ROCKCHIP || COMPILE_TEST 186 depends on ARCH_ROCKCHIP || COMPILE_TEST
184 depends on OF 187 depends on OF
185 depends on PCI_MSI_IRQ_DOMAIN 188 depends on PCI_MSI_IRQ_DOMAIN
186 select MFD_SYSCON 189 select MFD_SYSCON
190 select PCIE_ROCKCHIP
187 help 191 help
188 Say Y here if you want internal PCI support on Rockchip SoC. 192 Say Y here if you want internal PCI support on Rockchip SoC.
189 There is 1 internal PCIe port available to support GEN2 with 193 There is 1 internal PCIe port available to support GEN2 with
190 4 slots. 194 4 slots.
191 195
196config PCIE_ROCKCHIP_EP
197 bool "Rockchip PCIe endpoint controller"
198 depends on ARCH_ROCKCHIP || COMPILE_TEST
199 depends on OF
200 depends on PCI_ENDPOINT
201 select MFD_SYSCON
202 select PCIE_ROCKCHIP
203 help
204 Say Y here if you want to support Rockchip PCIe controller in
205 endpoint mode on Rockchip SoC. There is 1 internal PCIe port
206 available to support GEN2 with 4 slots.
207
192config PCIE_MEDIATEK 208config PCIE_MEDIATEK
193 bool "MediaTek PCIe controller" 209 bool "MediaTek PCIe controller"
194 depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST) 210 depends on ARCH_MEDIATEK || COMPILE_TEST
195 depends on OF 211 depends on OF
196 depends on PCI 212 depends on PCI_MSI_IRQ_DOMAIN
197 select PCIEPORTBUS
198 help 213 help
199 Say Y here if you want to enable PCIe controller support on 214 Say Y here if you want to enable PCIe controller support on
200 MediaTek SoCs. 215 MediaTek SoCs.
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 3b1059190867..11d21b026d37 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
20obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o 20obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
21obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o 21obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
22obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o 22obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
23obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
24obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
23obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o 25obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
24obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o 26obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
25obj-$(CONFIG_VMD) += vmd.o 27obj-$(CONFIG_VMD) += vmd.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 9abf549631b4..d3172d5d3d35 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -19,6 +19,8 @@
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20#include <linux/of_pci.h> 20#include <linux/of_pci.h>
21 21
22#include "../pci.h"
23
22/* PCIe core registers */ 24/* PCIe core registers */
23#define PCIE_CORE_CMD_STATUS_REG 0x4 25#define PCIE_CORE_CMD_STATUS_REG 0x4
24#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) 26#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
@@ -822,14 +824,13 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
822{ 824{
823 int err, res_valid = 0; 825 int err, res_valid = 0;
824 struct device *dev = &pcie->pdev->dev; 826 struct device *dev = &pcie->pdev->dev;
825 struct device_node *np = dev->of_node;
826 struct resource_entry *win, *tmp; 827 struct resource_entry *win, *tmp;
827 resource_size_t iobase; 828 resource_size_t iobase;
828 829
829 INIT_LIST_HEAD(&pcie->resources); 830 INIT_LIST_HEAD(&pcie->resources);
830 831
831 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources, 832 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
832 &iobase); 833 &pcie->resources, &iobase);
833 if (err) 834 if (err)
834 return err; 835 return err;
835 836
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 5008fd87956a..a1ebe9ed441f 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -28,6 +28,8 @@
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <linux/clk.h> 29#include <linux/clk.h>
30 30
31#include "../pci.h"
32
31/* 33/*
32 * Special configuration registers directly in the first few words 34 * Special configuration registers directly in the first few words
33 * in I/O space. 35 * in I/O space.
@@ -476,8 +478,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
476 if (IS_ERR(p->base)) 478 if (IS_ERR(p->base))
477 return PTR_ERR(p->base); 479 return PTR_ERR(p->base);
478 480
479 ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, 481 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
480 &res, &io_base); 482 &res, &io_base);
481 if (ret) 483 if (ret)
482 return ret; 484 return ret;
483 485
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 5d028f53fdcd..d8f10451f273 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -101,5 +101,18 @@ int pci_host_common_probe(struct platform_device *pdev,
101 return ret; 101 return ret;
102 } 102 }
103 103
104 platform_set_drvdata(pdev, bridge->bus);
105 return 0;
106}
107
108int pci_host_common_remove(struct platform_device *pdev)
109{
110 struct pci_bus *bus = platform_get_drvdata(pdev);
111
112 pci_lock_rescan_remove();
113 pci_stop_root_bus(bus);
114 pci_remove_root_bus(bus);
115 pci_unlock_rescan_remove();
116
104 return 0; 117 return 0;
105} 118}
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 45319ee3b484..dea3ec7592a2 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -95,5 +95,6 @@ static struct platform_driver gen_pci_driver = {
95 .suppress_bind_attrs = true, 95 .suppress_bind_attrs = true,
96 }, 96 },
97 .probe = gen_pci_probe, 97 .probe = gen_pci_probe,
98 .remove = pci_host_common_remove,
98}; 99};
99builtin_platform_driver(gen_pci_driver); 100builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 50cdefe3f6d3..6cc5036ac83c 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -433,7 +433,7 @@ enum hv_pcibus_state {
433struct hv_pcibus_device { 433struct hv_pcibus_device {
434 struct pci_sysdata sysdata; 434 struct pci_sysdata sysdata;
435 enum hv_pcibus_state state; 435 enum hv_pcibus_state state;
436 atomic_t remove_lock; 436 refcount_t remove_lock;
437 struct hv_device *hdev; 437 struct hv_device *hdev;
438 resource_size_t low_mmio_space; 438 resource_size_t low_mmio_space;
439 resource_size_t high_mmio_space; 439 resource_size_t high_mmio_space;
@@ -488,17 +488,6 @@ enum hv_pcichild_state {
488 hv_pcichild_maximum 488 hv_pcichild_maximum
489}; 489};
490 490
491enum hv_pcidev_ref_reason {
492 hv_pcidev_ref_invalid = 0,
493 hv_pcidev_ref_initial,
494 hv_pcidev_ref_by_slot,
495 hv_pcidev_ref_packet,
496 hv_pcidev_ref_pnp,
497 hv_pcidev_ref_childlist,
498 hv_pcidev_irqdata,
499 hv_pcidev_ref_max
500};
501
502struct hv_pci_dev { 491struct hv_pci_dev {
503 /* List protected by pci_rescan_remove_lock */ 492 /* List protected by pci_rescan_remove_lock */
504 struct list_head list_entry; 493 struct list_head list_entry;
@@ -548,14 +537,41 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
548 537
549static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, 538static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
550 u32 wslot); 539 u32 wslot);
551static void get_pcichild(struct hv_pci_dev *hv_pcidev, 540
552 enum hv_pcidev_ref_reason reason); 541static void get_pcichild(struct hv_pci_dev *hpdev)
553static void put_pcichild(struct hv_pci_dev *hv_pcidev, 542{
554 enum hv_pcidev_ref_reason reason); 543 refcount_inc(&hpdev->refs);
544}
545
546static void put_pcichild(struct hv_pci_dev *hpdev)
547{
548 if (refcount_dec_and_test(&hpdev->refs))
549 kfree(hpdev);
550}
555 551
556static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); 552static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
557static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); 553static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
558 554
555/*
556 * There is no good way to get notified from vmbus_onoffer_rescind(),
557 * so let's use polling here, since this is not a hot path.
558 */
559static int wait_for_response(struct hv_device *hdev,
560 struct completion *comp)
561{
562 while (true) {
563 if (hdev->channel->rescind) {
564 dev_warn_once(&hdev->device, "The device is gone.\n");
565 return -ENODEV;
566 }
567
568 if (wait_for_completion_timeout(comp, HZ / 10))
569 break;
570 }
571
572 return 0;
573}
574
559/** 575/**
560 * devfn_to_wslot() - Convert from Linux PCI slot to Windows 576 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
561 * @devfn: The Linux representation of PCI slot 577 * @devfn: The Linux representation of PCI slot
@@ -762,7 +778,7 @@ static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
762 778
763 _hv_pcifront_read_config(hpdev, where, size, val); 779 _hv_pcifront_read_config(hpdev, where, size, val);
764 780
765 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 781 put_pcichild(hpdev);
766 return PCIBIOS_SUCCESSFUL; 782 return PCIBIOS_SUCCESSFUL;
767} 783}
768 784
@@ -790,7 +806,7 @@ static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
790 806
791 _hv_pcifront_write_config(hpdev, where, size, val); 807 _hv_pcifront_write_config(hpdev, where, size, val);
792 808
793 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 809 put_pcichild(hpdev);
794 return PCIBIOS_SUCCESSFUL; 810 return PCIBIOS_SUCCESSFUL;
795} 811}
796 812
@@ -856,7 +872,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
856 } 872 }
857 873
858 hv_int_desc_free(hpdev, int_desc); 874 hv_int_desc_free(hpdev, int_desc);
859 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 875 put_pcichild(hpdev);
860} 876}
861 877
862static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest, 878static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
@@ -1186,13 +1202,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1186 msg->address_lo = comp.int_desc.address & 0xffffffff; 1202 msg->address_lo = comp.int_desc.address & 0xffffffff;
1187 msg->data = comp.int_desc.data; 1203 msg->data = comp.int_desc.data;
1188 1204
1189 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 1205 put_pcichild(hpdev);
1190 return; 1206 return;
1191 1207
1192free_int_desc: 1208free_int_desc:
1193 kfree(int_desc); 1209 kfree(int_desc);
1194drop_reference: 1210drop_reference:
1195 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 1211 put_pcichild(hpdev);
1196return_null_message: 1212return_null_message:
1197 msg->address_hi = 0; 1213 msg->address_hi = 0;
1198 msg->address_lo = 0; 1214 msg->address_lo = 0;
@@ -1283,7 +1299,6 @@ static u64 get_bar_size(u64 bar_val)
1283 */ 1299 */
1284static void survey_child_resources(struct hv_pcibus_device *hbus) 1300static void survey_child_resources(struct hv_pcibus_device *hbus)
1285{ 1301{
1286 struct list_head *iter;
1287 struct hv_pci_dev *hpdev; 1302 struct hv_pci_dev *hpdev;
1288 resource_size_t bar_size = 0; 1303 resource_size_t bar_size = 0;
1289 unsigned long flags; 1304 unsigned long flags;
@@ -1309,8 +1324,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
1309 * for a child device are a power of 2 in size and aligned in memory, 1324 * for a child device are a power of 2 in size and aligned in memory,
1310 * so it's sufficient to just add them up without tracking alignment. 1325 * so it's sufficient to just add them up without tracking alignment.
1311 */ 1326 */
1312 list_for_each(iter, &hbus->children) { 1327 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1313 hpdev = container_of(iter, struct hv_pci_dev, list_entry);
1314 for (i = 0; i < 6; i++) { 1328 for (i = 0; i < 6; i++) {
1315 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO) 1329 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
1316 dev_err(&hbus->hdev->device, 1330 dev_err(&hbus->hdev->device,
@@ -1363,7 +1377,6 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
1363 resource_size_t low_base = 0; 1377 resource_size_t low_base = 0;
1364 resource_size_t bar_size; 1378 resource_size_t bar_size;
1365 struct hv_pci_dev *hpdev; 1379 struct hv_pci_dev *hpdev;
1366 struct list_head *iter;
1367 unsigned long flags; 1380 unsigned long flags;
1368 u64 bar_val; 1381 u64 bar_val;
1369 u32 command; 1382 u32 command;
@@ -1385,9 +1398,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
1385 1398
1386 /* Pick addresses for the BARs. */ 1399 /* Pick addresses for the BARs. */
1387 do { 1400 do {
1388 list_for_each(iter, &hbus->children) { 1401 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1389 hpdev = container_of(iter, struct hv_pci_dev,
1390 list_entry);
1391 for (i = 0; i < 6; i++) { 1402 for (i = 0; i < 6; i++) {
1392 bar_val = hpdev->probed_bar[i]; 1403 bar_val = hpdev->probed_bar[i];
1393 if (bar_val == 0) 1404 if (bar_val == 0)
@@ -1508,19 +1519,6 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
1508 complete(&completion->host_event); 1519 complete(&completion->host_event);
1509} 1520}
1510 1521
1511static void get_pcichild(struct hv_pci_dev *hpdev,
1512 enum hv_pcidev_ref_reason reason)
1513{
1514 refcount_inc(&hpdev->refs);
1515}
1516
1517static void put_pcichild(struct hv_pci_dev *hpdev,
1518 enum hv_pcidev_ref_reason reason)
1519{
1520 if (refcount_dec_and_test(&hpdev->refs))
1521 kfree(hpdev);
1522}
1523
1524/** 1522/**
1525 * new_pcichild_device() - Create a new child device 1523 * new_pcichild_device() - Create a new child device
1526 * @hbus: The internal struct tracking this root PCI bus. 1524 * @hbus: The internal struct tracking this root PCI bus.
@@ -1568,24 +1566,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
1568 if (ret) 1566 if (ret)
1569 goto error; 1567 goto error;
1570 1568
1571 wait_for_completion(&comp_pkt.host_event); 1569 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
1570 goto error;
1572 1571
1573 hpdev->desc = *desc; 1572 hpdev->desc = *desc;
1574 refcount_set(&hpdev->refs, 1); 1573 refcount_set(&hpdev->refs, 1);
1575 get_pcichild(hpdev, hv_pcidev_ref_childlist); 1574 get_pcichild(hpdev);
1576 spin_lock_irqsave(&hbus->device_list_lock, flags); 1575 spin_lock_irqsave(&hbus->device_list_lock, flags);
1577 1576
1578 /*
1579 * When a device is being added to the bus, we set the PCI domain
1580 * number to be the device serial number, which is non-zero and
1581 * unique on the same VM. The serial numbers start with 1, and
1582 * increase by 1 for each device. So device names including this
1583 * can have shorter names than based on the bus instance UUID.
1584 * Only the first device serial number is used for domain, so the
1585 * domain number will not change after the first device is added.
1586 */
1587 if (list_empty(&hbus->children))
1588 hbus->sysdata.domain = desc->ser;
1589 list_add_tail(&hpdev->list_entry, &hbus->children); 1577 list_add_tail(&hpdev->list_entry, &hbus->children);
1590 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 1578 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1591 return hpdev; 1579 return hpdev;
@@ -1618,7 +1606,7 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1618 list_for_each_entry(iter, &hbus->children, list_entry) { 1606 list_for_each_entry(iter, &hbus->children, list_entry) {
1619 if (iter->desc.win_slot.slot == wslot) { 1607 if (iter->desc.win_slot.slot == wslot) {
1620 hpdev = iter; 1608 hpdev = iter;
1621 get_pcichild(hpdev, hv_pcidev_ref_by_slot); 1609 get_pcichild(hpdev);
1622 break; 1610 break;
1623 } 1611 }
1624 } 1612 }
@@ -1654,7 +1642,6 @@ static void pci_devices_present_work(struct work_struct *work)
1654{ 1642{
1655 u32 child_no; 1643 u32 child_no;
1656 bool found; 1644 bool found;
1657 struct list_head *iter;
1658 struct pci_function_description *new_desc; 1645 struct pci_function_description *new_desc;
1659 struct hv_pci_dev *hpdev; 1646 struct hv_pci_dev *hpdev;
1660 struct hv_pcibus_device *hbus; 1647 struct hv_pcibus_device *hbus;
@@ -1691,10 +1678,8 @@ static void pci_devices_present_work(struct work_struct *work)
1691 1678
1692 /* First, mark all existing children as reported missing. */ 1679 /* First, mark all existing children as reported missing. */
1693 spin_lock_irqsave(&hbus->device_list_lock, flags); 1680 spin_lock_irqsave(&hbus->device_list_lock, flags);
1694 list_for_each(iter, &hbus->children) { 1681 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1695 hpdev = container_of(iter, struct hv_pci_dev, 1682 hpdev->reported_missing = true;
1696 list_entry);
1697 hpdev->reported_missing = true;
1698 } 1683 }
1699 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 1684 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1700 1685
@@ -1704,11 +1689,8 @@ static void pci_devices_present_work(struct work_struct *work)
1704 new_desc = &dr->func[child_no]; 1689 new_desc = &dr->func[child_no];
1705 1690
1706 spin_lock_irqsave(&hbus->device_list_lock, flags); 1691 spin_lock_irqsave(&hbus->device_list_lock, flags);
1707 list_for_each(iter, &hbus->children) { 1692 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1708 hpdev = container_of(iter, struct hv_pci_dev, 1693 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
1709 list_entry);
1710 if ((hpdev->desc.win_slot.slot ==
1711 new_desc->win_slot.slot) &&
1712 (hpdev->desc.v_id == new_desc->v_id) && 1694 (hpdev->desc.v_id == new_desc->v_id) &&
1713 (hpdev->desc.d_id == new_desc->d_id) && 1695 (hpdev->desc.d_id == new_desc->d_id) &&
1714 (hpdev->desc.ser == new_desc->ser)) { 1696 (hpdev->desc.ser == new_desc->ser)) {
@@ -1730,12 +1712,10 @@ static void pci_devices_present_work(struct work_struct *work)
1730 spin_lock_irqsave(&hbus->device_list_lock, flags); 1712 spin_lock_irqsave(&hbus->device_list_lock, flags);
1731 do { 1713 do {
1732 found = false; 1714 found = false;
1733 list_for_each(iter, &hbus->children) { 1715 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1734 hpdev = container_of(iter, struct hv_pci_dev,
1735 list_entry);
1736 if (hpdev->reported_missing) { 1716 if (hpdev->reported_missing) {
1737 found = true; 1717 found = true;
1738 put_pcichild(hpdev, hv_pcidev_ref_childlist); 1718 put_pcichild(hpdev);
1739 list_move_tail(&hpdev->list_entry, &removed); 1719 list_move_tail(&hpdev->list_entry, &removed);
1740 break; 1720 break;
1741 } 1721 }
@@ -1748,7 +1728,7 @@ static void pci_devices_present_work(struct work_struct *work)
1748 hpdev = list_first_entry(&removed, struct hv_pci_dev, 1728 hpdev = list_first_entry(&removed, struct hv_pci_dev,
1749 list_entry); 1729 list_entry);
1750 list_del(&hpdev->list_entry); 1730 list_del(&hpdev->list_entry);
1751 put_pcichild(hpdev, hv_pcidev_ref_initial); 1731 put_pcichild(hpdev);
1752 } 1732 }
1753 1733
1754 switch (hbus->state) { 1734 switch (hbus->state) {
@@ -1883,8 +1863,8 @@ static void hv_eject_device_work(struct work_struct *work)
1883 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, 1863 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
1884 VM_PKT_DATA_INBAND, 0); 1864 VM_PKT_DATA_INBAND, 0);
1885 1865
1886 put_pcichild(hpdev, hv_pcidev_ref_childlist); 1866 put_pcichild(hpdev);
1887 put_pcichild(hpdev, hv_pcidev_ref_pnp); 1867 put_pcichild(hpdev);
1888 put_hvpcibus(hpdev->hbus); 1868 put_hvpcibus(hpdev->hbus);
1889} 1869}
1890 1870
@@ -1899,7 +1879,7 @@ static void hv_eject_device_work(struct work_struct *work)
1899static void hv_pci_eject_device(struct hv_pci_dev *hpdev) 1879static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
1900{ 1880{
1901 hpdev->state = hv_pcichild_ejecting; 1881 hpdev->state = hv_pcichild_ejecting;
1902 get_pcichild(hpdev, hv_pcidev_ref_pnp); 1882 get_pcichild(hpdev);
1903 INIT_WORK(&hpdev->wrk, hv_eject_device_work); 1883 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
1904 get_hvpcibus(hpdev->hbus); 1884 get_hvpcibus(hpdev->hbus);
1905 queue_work(hpdev->hbus->wq, &hpdev->wrk); 1885 queue_work(hpdev->hbus->wq, &hpdev->wrk);
@@ -1999,8 +1979,7 @@ static void hv_pci_onchannelcallback(void *context)
1999 dev_message->wslot.slot); 1979 dev_message->wslot.slot);
2000 if (hpdev) { 1980 if (hpdev) {
2001 hv_pci_eject_device(hpdev); 1981 hv_pci_eject_device(hpdev);
2002 put_pcichild(hpdev, 1982 put_pcichild(hpdev);
2003 hv_pcidev_ref_by_slot);
2004 } 1983 }
2005 break; 1984 break;
2006 1985
@@ -2069,15 +2048,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
2069 sizeof(struct pci_version_request), 2048 sizeof(struct pci_version_request),
2070 (unsigned long)pkt, VM_PKT_DATA_INBAND, 2049 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2071 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 2050 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2051 if (!ret)
2052 ret = wait_for_response(hdev, &comp_pkt.host_event);
2053
2072 if (ret) { 2054 if (ret) {
2073 dev_err(&hdev->device, 2055 dev_err(&hdev->device,
2074 "PCI Pass-through VSP failed sending version reqquest: %#x", 2056 "PCI Pass-through VSP failed to request version: %d",
2075 ret); 2057 ret);
2076 goto exit; 2058 goto exit;
2077 } 2059 }
2078 2060
2079 wait_for_completion(&comp_pkt.host_event);
2080
2081 if (comp_pkt.completion_status >= 0) { 2061 if (comp_pkt.completion_status >= 0) {
2082 pci_protocol_version = pci_protocol_versions[i]; 2062 pci_protocol_version = pci_protocol_versions[i];
2083 dev_info(&hdev->device, 2063 dev_info(&hdev->device,
@@ -2286,11 +2266,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
2286 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), 2266 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
2287 (unsigned long)pkt, VM_PKT_DATA_INBAND, 2267 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2288 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 2268 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2269 if (!ret)
2270 ret = wait_for_response(hdev, &comp_pkt.host_event);
2271
2289 if (ret) 2272 if (ret)
2290 goto exit; 2273 goto exit;
2291 2274
2292 wait_for_completion(&comp_pkt.host_event);
2293
2294 if (comp_pkt.completion_status < 0) { 2275 if (comp_pkt.completion_status < 0) {
2295 dev_err(&hdev->device, 2276 dev_err(&hdev->device,
2296 "PCI Pass-through VSP failed D0 Entry with status %x\n", 2277 "PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2330,11 +2311,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
2330 2311
2331 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), 2312 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
2332 0, VM_PKT_DATA_INBAND, 0); 2313 0, VM_PKT_DATA_INBAND, 0);
2333 if (ret) 2314 if (!ret)
2334 return ret; 2315 ret = wait_for_response(hdev, &comp);
2335 2316
2336 wait_for_completion(&comp); 2317 return ret;
2337 return 0;
2338} 2318}
2339 2319
2340/** 2320/**
@@ -2398,17 +2378,17 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
2398 PCI_RESOURCES_ASSIGNED2; 2378 PCI_RESOURCES_ASSIGNED2;
2399 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; 2379 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
2400 } 2380 }
2401 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 2381 put_pcichild(hpdev);
2402 2382
2403 ret = vmbus_sendpacket(hdev->channel, &pkt->message, 2383 ret = vmbus_sendpacket(hdev->channel, &pkt->message,
2404 size_res, (unsigned long)pkt, 2384 size_res, (unsigned long)pkt,
2405 VM_PKT_DATA_INBAND, 2385 VM_PKT_DATA_INBAND,
2406 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 2386 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2387 if (!ret)
2388 ret = wait_for_response(hdev, &comp_pkt.host_event);
2407 if (ret) 2389 if (ret)
2408 break; 2390 break;
2409 2391
2410 wait_for_completion(&comp_pkt.host_event);
2411
2412 if (comp_pkt.completion_status < 0) { 2392 if (comp_pkt.completion_status < 0) {
2413 ret = -EPROTO; 2393 ret = -EPROTO;
2414 dev_err(&hdev->device, 2394 dev_err(&hdev->device,
@@ -2446,7 +2426,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
2446 pkt.message_type.type = PCI_RESOURCES_RELEASED; 2426 pkt.message_type.type = PCI_RESOURCES_RELEASED;
2447 pkt.wslot.slot = hpdev->desc.win_slot.slot; 2427 pkt.wslot.slot = hpdev->desc.win_slot.slot;
2448 2428
2449 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 2429 put_pcichild(hpdev);
2450 2430
2451 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0, 2431 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
2452 VM_PKT_DATA_INBAND, 0); 2432 VM_PKT_DATA_INBAND, 0);
@@ -2459,12 +2439,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
2459 2439
2460static void get_hvpcibus(struct hv_pcibus_device *hbus) 2440static void get_hvpcibus(struct hv_pcibus_device *hbus)
2461{ 2441{
2462 atomic_inc(&hbus->remove_lock); 2442 refcount_inc(&hbus->remove_lock);
2463} 2443}
2464 2444
2465static void put_hvpcibus(struct hv_pcibus_device *hbus) 2445static void put_hvpcibus(struct hv_pcibus_device *hbus)
2466{ 2446{
2467 if (atomic_dec_and_test(&hbus->remove_lock)) 2447 if (refcount_dec_and_test(&hbus->remove_lock))
2468 complete(&hbus->remove_event); 2448 complete(&hbus->remove_event);
2469} 2449}
2470 2450
@@ -2508,7 +2488,7 @@ static int hv_pci_probe(struct hv_device *hdev,
2508 hdev->dev_instance.b[8] << 8; 2488 hdev->dev_instance.b[8] << 8;
2509 2489
2510 hbus->hdev = hdev; 2490 hbus->hdev = hdev;
2511 atomic_inc(&hbus->remove_lock); 2491 refcount_set(&hbus->remove_lock, 1);
2512 INIT_LIST_HEAD(&hbus->children); 2492 INIT_LIST_HEAD(&hbus->children);
2513 INIT_LIST_HEAD(&hbus->dr_list); 2493 INIT_LIST_HEAD(&hbus->dr_list);
2514 INIT_LIST_HEAD(&hbus->resources_for_children); 2494 INIT_LIST_HEAD(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 5d4dccfc9d81..23e270839e6a 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -21,6 +21,8 @@
21#include <linux/of_pci.h> 21#include <linux/of_pci.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23 23
24#include "../pci.h"
25
24/* 26/*
25 * PCIe unit register offsets. 27 * PCIe unit register offsets.
26 */ 28 */
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index dd4f1a6b57c5..326171cb1a97 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -21,6 +21,8 @@
21#include <linux/sizes.h> 21#include <linux/sizes.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include "../pci.h"
25
24/* AHB-PCI Bridge PCI communication registers */ 26/* AHB-PCI Bridge PCI communication registers */
25#define RCAR_AHBPCI_PCICOM_OFFSET 0x800 27#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
26 28
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 389e74be846c..f4f53d092e00 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -40,6 +40,8 @@
40#include <soc/tegra/cpuidle.h> 40#include <soc/tegra/cpuidle.h>
41#include <soc/tegra/pmc.h> 41#include <soc/tegra/pmc.h>
42 42
43#include "../pci.h"
44
43#define INT_PCI_MSI_NR (8 * 32) 45#define INT_PCI_MSI_NR (8 * 32)
44 46
45/* register definitions */ 47/* register definitions */
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
index 0a4dea796663..68b8bfbdb867 100644
--- a/drivers/pci/host/pci-v3-semi.c
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -33,6 +33,8 @@
33#include <linux/regmap.h> 33#include <linux/regmap.h>
34#include <linux/clk.h> 34#include <linux/clk.h>
35 35
36#include "../pci.h"
37
36#define V3_PCI_VENDOR 0x00000000 38#define V3_PCI_VENDOR 0x00000000
37#define V3_PCI_DEVICE 0x00000002 39#define V3_PCI_DEVICE 0x00000002
38#define V3_PCI_CMD 0x00000004 40#define V3_PCI_CMD 0x00000004
@@ -791,7 +793,8 @@ static int v3_pci_probe(struct platform_device *pdev)
791 if (IS_ERR(v3->config_base)) 793 if (IS_ERR(v3->config_base))
792 return PTR_ERR(v3->config_base); 794 return PTR_ERR(v3->config_base);
793 795
794 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base); 796 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
797 &io_base);
795 if (ret) 798 if (ret)
796 return ret; 799 return ret;
797 800
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5b3876f5312b..994f32061b32 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -15,6 +15,8 @@
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17 17
18#include "../pci.h"
19
18static void __iomem *versatile_pci_base; 20static void __iomem *versatile_pci_base;
19static void __iomem *versatile_cfg_base[2]; 21static void __iomem *versatile_cfg_base[2];
20 22
@@ -64,11 +66,10 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
64 struct list_head *res) 66 struct list_head *res)
65{ 67{
66 int err, mem = 1, res_valid = 0; 68 int err, mem = 1, res_valid = 0;
67 struct device_node *np = dev->of_node;
68 resource_size_t iobase; 69 resource_size_t iobase;
69 struct resource_entry *win, *tmp; 70 struct resource_entry *win, *tmp;
70 71
71 err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase); 72 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
72 if (err) 73 if (err)
73 return err; 74 return err;
74 75
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 0a0d7ee6d3c9..d854d67e873c 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -22,6 +22,8 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include "../pci.h"
26
25#define PCIECORE_CTLANDSTATUS 0x50 27#define PCIECORE_CTLANDSTATUS 0x50
26#define PIM1_1L 0x80 28#define PIM1_1L 0x80
27#define IBAR2 0x98 29#define IBAR2 0x98
@@ -632,7 +634,8 @@ static int xgene_pcie_probe(struct platform_device *pdev)
632 if (ret) 634 if (ret)
633 return ret; 635 return ret;
634 636
635 ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase); 637 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
638 &iobase);
636 if (ret) 639 if (ret)
637 return ret; 640 return ret;
638 641
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index a6af62e0256d..7d05e51205b3 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -17,6 +17,8 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include "../pci.h"
21
20#define RP_TX_REG0 0x2000 22#define RP_TX_REG0 0x2000
21#define RP_TX_REG1 0x2004 23#define RP_TX_REG1 0x2004
22#define RP_TX_CNTRL 0x2008 24#define RP_TX_CNTRL 0x2008
@@ -488,11 +490,10 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
488{ 490{
489 int err, res_valid = 0; 491 int err, res_valid = 0;
490 struct device *dev = &pcie->pdev->dev; 492 struct device *dev = &pcie->pdev->dev;
491 struct device_node *np = dev->of_node;
492 struct resource_entry *win; 493 struct resource_entry *win;
493 494
494 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources, 495 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
495 NULL); 496 &pcie->resources, NULL);
496 if (err) 497 if (err)
497 return err; 498 return err;
498 499
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index e764a2a2693c..f30f5f3fb5c1 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -16,6 +16,7 @@
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/phy/phy.h> 17#include <linux/phy/phy.h>
18 18
19#include "../pci.h"
19#include "pcie-iproc.h" 20#include "pcie-iproc.h"
20 21
21static const struct of_device_id iproc_pcie_of_match_table[] = { 22static const struct of_device_id iproc_pcie_of_match_table[] = {
@@ -99,8 +100,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
99 pcie->phy = NULL; 100 pcie->phy = NULL;
100 } 101 }
101 102
102 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources, 103 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
103 &iobase); 104 &iobase);
104 if (ret) { 105 if (ret) {
105 dev_err(dev, "unable to get PCI host bridge resources\n"); 106 dev_err(dev, "unable to get PCI host bridge resources\n");
106 return ret; 107 return ret;
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index a8b20c5012a9..0baabe30858f 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -11,8 +11,10 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/iopoll.h> 12#include <linux/iopoll.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h> 15#include <linux/irqdomain.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/msi.h>
16#include <linux/of_address.h> 18#include <linux/of_address.h>
17#include <linux/of_pci.h> 19#include <linux/of_pci.h>
18#include <linux/of_platform.h> 20#include <linux/of_platform.h>
@@ -22,6 +24,8 @@
22#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
23#include <linux/reset.h> 25#include <linux/reset.h>
24 26
27#include "../pci.h"
28
25/* PCIe shared registers */ 29/* PCIe shared registers */
26#define PCIE_SYS_CFG 0x00 30#define PCIE_SYS_CFG 0x00
27#define PCIE_INT_ENABLE 0x0c 31#define PCIE_INT_ENABLE 0x0c
@@ -66,6 +70,10 @@
66 70
67/* PCIe V2 per-port registers */ 71/* PCIe V2 per-port registers */
68#define PCIE_MSI_VECTOR 0x0c0 72#define PCIE_MSI_VECTOR 0x0c0
73
74#define PCIE_CONF_VEND_ID 0x100
75#define PCIE_CONF_CLASS_ID 0x106
76
69#define PCIE_INT_MASK 0x420 77#define PCIE_INT_MASK 0x420
70#define INTX_MASK GENMASK(19, 16) 78#define INTX_MASK GENMASK(19, 16)
71#define INTX_SHIFT 16 79#define INTX_SHIFT 16
@@ -125,13 +133,13 @@ struct mtk_pcie_port;
125 133
126/** 134/**
127 * struct mtk_pcie_soc - differentiate between host generations 135 * struct mtk_pcie_soc - differentiate between host generations
128 * @has_msi: whether this host supports MSI interrupts or not 136 * @need_fix_class_id: whether this host's class ID needed to be fixed or not
129 * @ops: pointer to configuration access functions 137 * @ops: pointer to configuration access functions
130 * @startup: pointer to controller setting functions 138 * @startup: pointer to controller setting functions
131 * @setup_irq: pointer to initialize IRQ functions 139 * @setup_irq: pointer to initialize IRQ functions
132 */ 140 */
133struct mtk_pcie_soc { 141struct mtk_pcie_soc {
134 bool has_msi; 142 bool need_fix_class_id;
135 struct pci_ops *ops; 143 struct pci_ops *ops;
136 int (*startup)(struct mtk_pcie_port *port); 144 int (*startup)(struct mtk_pcie_port *port);
137 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); 145 int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -155,7 +163,9 @@ struct mtk_pcie_soc {
155 * @lane: lane count 163 * @lane: lane count
156 * @slot: port slot 164 * @slot: port slot
157 * @irq_domain: legacy INTx IRQ domain 165 * @irq_domain: legacy INTx IRQ domain
166 * @inner_domain: inner IRQ domain
158 * @msi_domain: MSI IRQ domain 167 * @msi_domain: MSI IRQ domain
168 * @lock: protect the msi_irq_in_use bitmap
159 * @msi_irq_in_use: bit map for assigned MSI IRQ 169 * @msi_irq_in_use: bit map for assigned MSI IRQ
160 */ 170 */
161struct mtk_pcie_port { 171struct mtk_pcie_port {
@@ -173,7 +183,9 @@ struct mtk_pcie_port {
173 u32 lane; 183 u32 lane;
174 u32 slot; 184 u32 slot;
175 struct irq_domain *irq_domain; 185 struct irq_domain *irq_domain;
186 struct irq_domain *inner_domain;
176 struct irq_domain *msi_domain; 187 struct irq_domain *msi_domain;
188 struct mutex lock;
177 DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); 189 DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
178}; 190};
179 191
@@ -375,6 +387,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
375{ 387{
376 struct mtk_pcie *pcie = port->pcie; 388 struct mtk_pcie *pcie = port->pcie;
377 struct resource *mem = &pcie->mem; 389 struct resource *mem = &pcie->mem;
390 const struct mtk_pcie_soc *soc = port->pcie->soc;
378 u32 val; 391 u32 val;
379 size_t size; 392 size_t size;
380 int err; 393 int err;
@@ -403,6 +416,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
403 PCIE_MAC_SRSTB | PCIE_CRSTB; 416 PCIE_MAC_SRSTB | PCIE_CRSTB;
404 writel(val, port->base + PCIE_RST_CTRL); 417 writel(val, port->base + PCIE_RST_CTRL);
405 418
419 /* Set up vendor ID and class code */
420 if (soc->need_fix_class_id) {
421 val = PCI_VENDOR_ID_MEDIATEK;
422 writew(val, port->base + PCIE_CONF_VEND_ID);
423
424 val = PCI_CLASS_BRIDGE_HOST;
425 writew(val, port->base + PCIE_CONF_CLASS_ID);
426 }
427
406 /* 100ms timeout value should be enough for Gen1/2 training */ 428 /* 100ms timeout value should be enough for Gen1/2 training */
407 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, 429 err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
408 !!(val & PCIE_PORT_LINKUP_V2), 20, 430 !!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -430,103 +452,130 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
430 return 0; 452 return 0;
431} 453}
432 454
433static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port) 455static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
434{ 456{
435 int msi; 457 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
458 phys_addr_t addr;
436 459
437 msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); 460 /* MT2712/MT7622 only support 32-bit MSI addresses */
438 if (msi < MTK_MSI_IRQS_NUM) 461 addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
439 set_bit(msi, port->msi_irq_in_use); 462 msg->address_hi = 0;
440 else 463 msg->address_lo = lower_32_bits(addr);
441 return -ENOSPC; 464
465 msg->data = data->hwirq;
442 466
443 return msi; 467 dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
468 (int)data->hwirq, msg->address_hi, msg->address_lo);
444} 469}
445 470
446static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq) 471static int mtk_msi_set_affinity(struct irq_data *irq_data,
472 const struct cpumask *mask, bool force)
447{ 473{
448 clear_bit(hwirq, port->msi_irq_in_use); 474 return -EINVAL;
449} 475}
450 476
451static int mtk_pcie_msi_setup_irq(struct msi_controller *chip, 477static void mtk_msi_ack_irq(struct irq_data *data)
452 struct pci_dev *pdev, struct msi_desc *desc)
453{ 478{
454 struct mtk_pcie_port *port; 479 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
455 struct msi_msg msg; 480 u32 hwirq = data->hwirq;
456 unsigned int irq;
457 int hwirq;
458 phys_addr_t msg_addr;
459 481
460 port = mtk_pcie_find_port(pdev->bus, pdev->devfn); 482 writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
461 if (!port) 483}
462 return -EINVAL;
463 484
464 hwirq = mtk_pcie_msi_alloc(port); 485static struct irq_chip mtk_msi_bottom_irq_chip = {
465 if (hwirq < 0) 486 .name = "MTK MSI",
466 return hwirq; 487 .irq_compose_msi_msg = mtk_compose_msi_msg,
488 .irq_set_affinity = mtk_msi_set_affinity,
489 .irq_ack = mtk_msi_ack_irq,
490};
467 491
468 irq = irq_create_mapping(port->msi_domain, hwirq); 492static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
469 if (!irq) { 493 unsigned int nr_irqs, void *args)
470 mtk_pcie_msi_free(port, hwirq); 494{
471 return -EINVAL; 495 struct mtk_pcie_port *port = domain->host_data;
472 } 496 unsigned long bit;
497
498 WARN_ON(nr_irqs != 1);
499 mutex_lock(&port->lock);
473 500
474 chip->dev = &pdev->dev; 501 bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
502 if (bit >= MTK_MSI_IRQS_NUM) {
503 mutex_unlock(&port->lock);
504 return -ENOSPC;
505 }
475 506
476 irq_set_msi_desc(irq, desc); 507 __set_bit(bit, port->msi_irq_in_use);
477 508
478 /* MT2712/MT7622 only support 32-bit MSI addresses */ 509 mutex_unlock(&port->lock);
479 msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
480 msg.address_hi = 0;
481 msg.address_lo = lower_32_bits(msg_addr);
482 msg.data = hwirq;
483 510
484 pci_write_msi_msg(irq, &msg); 511 irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
512 domain->host_data, handle_edge_irq,
513 NULL, NULL);
485 514
486 return 0; 515 return 0;
487} 516}
488 517
489static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) 518static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
519 unsigned int virq, unsigned int nr_irqs)
490{ 520{
491 struct pci_dev *pdev = to_pci_dev(chip->dev); 521 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
492 struct irq_data *d = irq_get_irq_data(irq); 522 struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
493 irq_hw_number_t hwirq = irqd_to_hwirq(d);
494 struct mtk_pcie_port *port;
495 523
496 port = mtk_pcie_find_port(pdev->bus, pdev->devfn); 524 mutex_lock(&port->lock);
497 if (!port)
498 return;
499 525
500 irq_dispose_mapping(irq); 526 if (!test_bit(d->hwirq, port->msi_irq_in_use))
501 mtk_pcie_msi_free(port, hwirq); 527 dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
528 d->hwirq);
529 else
530 __clear_bit(d->hwirq, port->msi_irq_in_use);
531
532 mutex_unlock(&port->lock);
533
534 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
502} 535}
503 536
504static struct msi_controller mtk_pcie_msi_chip = { 537static const struct irq_domain_ops msi_domain_ops = {
505 .setup_irq = mtk_pcie_msi_setup_irq, 538 .alloc = mtk_pcie_irq_domain_alloc,
506 .teardown_irq = mtk_msi_teardown_irq, 539 .free = mtk_pcie_irq_domain_free,
507}; 540};
508 541
509static struct irq_chip mtk_msi_irq_chip = { 542static struct irq_chip mtk_msi_irq_chip = {
510 .name = "MTK PCIe MSI", 543 .name = "MTK PCIe MSI",
511 .irq_enable = pci_msi_unmask_irq, 544 .irq_ack = irq_chip_ack_parent,
512 .irq_disable = pci_msi_mask_irq, 545 .irq_mask = pci_msi_mask_irq,
513 .irq_mask = pci_msi_mask_irq, 546 .irq_unmask = pci_msi_unmask_irq,
514 .irq_unmask = pci_msi_unmask_irq,
515}; 547};
516 548
517static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq, 549static struct msi_domain_info mtk_msi_domain_info = {
518 irq_hw_number_t hwirq) 550 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
551 MSI_FLAG_PCI_MSIX),
552 .chip = &mtk_msi_irq_chip,
553};
554
555static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
519{ 556{
520 irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq); 557 struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
521 irq_set_chip_data(irq, domain->host_data); 558
559 mutex_init(&port->lock);
560
561 port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
562 &msi_domain_ops, port);
563 if (!port->inner_domain) {
564 dev_err(port->pcie->dev, "failed to create IRQ domain\n");
565 return -ENOMEM;
566 }
567
568 port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
569 port->inner_domain);
570 if (!port->msi_domain) {
571 dev_err(port->pcie->dev, "failed to create MSI domain\n");
572 irq_domain_remove(port->inner_domain);
573 return -ENOMEM;
574 }
522 575
523 return 0; 576 return 0;
524} 577}
525 578
526static const struct irq_domain_ops msi_domain_ops = {
527 .map = mtk_pcie_msi_map,
528};
529
530static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) 579static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
531{ 580{
532 u32 val; 581 u32 val;
@@ -559,6 +608,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
559{ 608{
560 struct device *dev = port->pcie->dev; 609 struct device *dev = port->pcie->dev;
561 struct device_node *pcie_intc_node; 610 struct device_node *pcie_intc_node;
611 int ret;
562 612
563 /* Setup INTx */ 613 /* Setup INTx */
564 pcie_intc_node = of_get_next_child(node, NULL); 614 pcie_intc_node = of_get_next_child(node, NULL);
@@ -575,27 +625,28 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
575 } 625 }
576 626
577 if (IS_ENABLED(CONFIG_PCI_MSI)) { 627 if (IS_ENABLED(CONFIG_PCI_MSI)) {
578 port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM, 628 ret = mtk_pcie_allocate_msi_domains(port);
579 &msi_domain_ops, 629 if (ret)
580 &mtk_pcie_msi_chip); 630 return ret;
581 if (!port->msi_domain) { 631
582 dev_err(dev, "failed to create MSI IRQ domain\n");
583 return -ENODEV;
584 }
585 mtk_pcie_enable_msi(port); 632 mtk_pcie_enable_msi(port);
586 } 633 }
587 634
588 return 0; 635 return 0;
589} 636}
590 637
591static irqreturn_t mtk_pcie_intr_handler(int irq, void *data) 638static void mtk_pcie_intr_handler(struct irq_desc *desc)
592{ 639{
593 struct mtk_pcie_port *port = (struct mtk_pcie_port *)data; 640 struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
641 struct irq_chip *irqchip = irq_desc_get_chip(desc);
594 unsigned long status; 642 unsigned long status;
595 u32 virq; 643 u32 virq;
596 u32 bit = INTX_SHIFT; 644 u32 bit = INTX_SHIFT;
597 645
598 while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) { 646 chained_irq_enter(irqchip, desc);
647
648 status = readl(port->base + PCIE_INT_STATUS);
649 if (status & INTX_MASK) {
599 for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { 650 for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
600 /* Clear the INTx */ 651 /* Clear the INTx */
601 writel(1 << bit, port->base + PCIE_INT_STATUS); 652 writel(1 << bit, port->base + PCIE_INT_STATUS);
@@ -606,14 +657,12 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
606 } 657 }
607 658
608 if (IS_ENABLED(CONFIG_PCI_MSI)) { 659 if (IS_ENABLED(CONFIG_PCI_MSI)) {
609 while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) { 660 if (status & MSI_STATUS){
610 unsigned long imsi_status; 661 unsigned long imsi_status;
611 662
612 while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { 663 while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
613 for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { 664 for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
614 /* Clear the MSI */ 665 virq = irq_find_mapping(port->inner_domain, bit);
615 writel(1 << bit, port->base + PCIE_IMSI_STATUS);
616 virq = irq_find_mapping(port->msi_domain, bit);
617 generic_handle_irq(virq); 666 generic_handle_irq(virq);
618 } 667 }
619 } 668 }
@@ -622,7 +671,9 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
622 } 671 }
623 } 672 }
624 673
625 return IRQ_HANDLED; 674 chained_irq_exit(irqchip, desc);
675
676 return;
626} 677}
627 678
628static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, 679static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
@@ -633,20 +684,15 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
633 struct platform_device *pdev = to_platform_device(dev); 684 struct platform_device *pdev = to_platform_device(dev);
634 int err, irq; 685 int err, irq;
635 686
636 irq = platform_get_irq(pdev, port->slot);
637 err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
638 IRQF_SHARED, "mtk-pcie", port);
639 if (err) {
640 dev_err(dev, "unable to request IRQ %d\n", irq);
641 return err;
642 }
643
644 err = mtk_pcie_init_irq_domain(port, node); 687 err = mtk_pcie_init_irq_domain(port, node);
645 if (err) { 688 if (err) {
646 dev_err(dev, "failed to init PCIe IRQ domain\n"); 689 dev_err(dev, "failed to init PCIe IRQ domain\n");
647 return err; 690 return err;
648 } 691 }
649 692
693 irq = platform_get_irq(pdev, port->slot);
694 irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
695
650 return 0; 696 return 0;
651} 697}
652 698
@@ -1080,8 +1126,6 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host)
1080 host->map_irq = of_irq_parse_and_map_pci; 1126 host->map_irq = of_irq_parse_and_map_pci;
1081 host->swizzle_irq = pci_common_swizzle; 1127 host->swizzle_irq = pci_common_swizzle;
1082 host->sysdata = pcie; 1128 host->sysdata = pcie;
1083 if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
1084 host->msi = &mtk_pcie_msi_chip;
1085 1129
1086 err = pci_scan_root_bus_bridge(host); 1130 err = pci_scan_root_bus_bridge(host);
1087 if (err < 0) 1131 if (err < 0)
@@ -1142,8 +1186,14 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
1142 .startup = mtk_pcie_startup_port, 1186 .startup = mtk_pcie_startup_port,
1143}; 1187};
1144 1188
1145static const struct mtk_pcie_soc mtk_pcie_soc_v2 = { 1189static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
1146 .has_msi = true, 1190 .ops = &mtk_pcie_ops_v2,
1191 .startup = mtk_pcie_startup_port_v2,
1192 .setup_irq = mtk_pcie_setup_irq,
1193};
1194
1195static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
1196 .need_fix_class_id = true,
1147 .ops = &mtk_pcie_ops_v2, 1197 .ops = &mtk_pcie_ops_v2,
1148 .startup = mtk_pcie_startup_port_v2, 1198 .startup = mtk_pcie_startup_port_v2,
1149 .setup_irq = mtk_pcie_setup_irq, 1199 .setup_irq = mtk_pcie_setup_irq,
@@ -1152,8 +1202,8 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
1152static const struct of_device_id mtk_pcie_ids[] = { 1202static const struct of_device_id mtk_pcie_ids[] = {
1153 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, 1203 { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
1154 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, 1204 { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
1155 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 }, 1205 { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
1156 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 }, 1206 { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
1157 {}, 1207 {},
1158}; 1208};
1159 1209
diff --git a/drivers/pci/host/pcie-mobiveil.c b/drivers/pci/host/pcie-mobiveil.c
new file mode 100644
index 000000000000..4d6c20e47bed
--- /dev/null
+++ b/drivers/pci/host/pcie-mobiveil.c
@@ -0,0 +1,866 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Mobiveil PCIe Host controller
4 *
5 * Copyright (c) 2018 Mobiveil Inc.
6 * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
7 */
8
9#include <linux/delay.h>
10#include <linux/init.h>
11#include <linux/interrupt.h>
12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/irqdomain.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/msi.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21#include <linux/of_pci.h>
22#include <linux/pci.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25
26/* register offsets and bit positions */
27
28/*
29 * translation tables are grouped into windows, each window registers are
30 * grouped into blocks of 4 or 16 registers each
31 */
32#define PAB_REG_BLOCK_SIZE 16
33#define PAB_EXT_REG_BLOCK_SIZE 4
34
35#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
36#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
37
38#define LTSSM_STATUS 0x0404
39#define LTSSM_STATUS_L0_MASK 0x3f
40#define LTSSM_STATUS_L0 0x2d
41
42#define PAB_CTRL 0x0808
43#define AMBA_PIO_ENABLE_SHIFT 0
44#define PEX_PIO_ENABLE_SHIFT 1
45#define PAGE_SEL_SHIFT 13
46#define PAGE_SEL_MASK 0x3f
47#define PAGE_LO_MASK 0x3ff
48#define PAGE_SEL_EN 0xc00
49#define PAGE_SEL_OFFSET_SHIFT 10
50
51#define PAB_AXI_PIO_CTRL 0x0840
52#define APIO_EN_MASK 0xf
53
54#define PAB_PEX_PIO_CTRL 0x08c0
55#define PIO_ENABLE_SHIFT 0
56
57#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
58#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
59#define PAB_INTP_INTX_MASK 0x01e0
60#define PAB_INTP_MSI_MASK 0x8
61
62#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
63#define WIN_ENABLE_SHIFT 0
64#define WIN_TYPE_SHIFT 1
65
66#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
67
68#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
69#define AXI_WINDOW_ALIGN_MASK 3
70
71#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
72#define PAB_BUS_SHIFT 24
73#define PAB_DEVICE_SHIFT 19
74#define PAB_FUNCTION_SHIFT 16
75
76#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
77#define PAB_INTP_AXI_PIO_CLASS 0x474
78
79#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
80#define AMAP_CTRL_EN_SHIFT 0
81#define AMAP_CTRL_TYPE_SHIFT 1
82
83#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
84#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
85#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
86#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
87
88/* starting offset of INTX bits in status register */
89#define PAB_INTX_START 5
90
91/* supported number of MSI interrupts */
92#define PCI_NUM_MSI 16
93
94/* MSI registers */
95#define MSI_BASE_LO_OFFSET 0x04
96#define MSI_BASE_HI_OFFSET 0x08
97#define MSI_SIZE_OFFSET 0x0c
98#define MSI_ENABLE_OFFSET 0x14
99#define MSI_STATUS_OFFSET 0x18
100#define MSI_DATA_OFFSET 0x20
101#define MSI_ADDR_L_OFFSET 0x24
102#define MSI_ADDR_H_OFFSET 0x28
103
104/* outbound and inbound window definitions */
105#define WIN_NUM_0 0
106#define WIN_NUM_1 1
107#define CFG_WINDOW_TYPE 0
108#define IO_WINDOW_TYPE 1
109#define MEM_WINDOW_TYPE 2
110#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024)
111#define MAX_PIO_WINDOWS 8
112
113/* Parameters for the waiting for link up routine */
114#define LINK_WAIT_MAX_RETRIES 10
115#define LINK_WAIT_MIN 90000
116#define LINK_WAIT_MAX 100000
117
118struct mobiveil_msi { /* MSI information */
119 struct mutex lock; /* protect bitmap variable */
120 struct irq_domain *msi_domain;
121 struct irq_domain *dev_domain;
122 phys_addr_t msi_pages_phys;
123 int num_of_vectors;
124 DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
125};
126
127struct mobiveil_pcie {
128 struct platform_device *pdev;
129 struct list_head resources;
130 void __iomem *config_axi_slave_base; /* endpoint config base */
131 void __iomem *csr_axi_slave_base; /* root port config base */
132 void __iomem *apb_csr_base; /* MSI register base */
133 void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
134 struct irq_domain *intx_domain;
135 raw_spinlock_t intx_mask_lock;
136 int irq;
137 int apio_wins;
138 int ppio_wins;
139 int ob_wins_configured; /* configured outbound windows */
140 int ib_wins_configured; /* configured inbound windows */
141 struct resource *ob_io_res;
142 char root_bus_nr;
143 struct mobiveil_msi msi;
144};
145
146static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
147 const u32 reg)
148{
149 writel_relaxed(value, pcie->csr_axi_slave_base + reg);
150}
151
152static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
153{
154 return readl_relaxed(pcie->csr_axi_slave_base + reg);
155}
156
157static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
158{
159 return (csr_readl(pcie, LTSSM_STATUS) &
160 LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
161}
162
163static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
164{
165 struct mobiveil_pcie *pcie = bus->sysdata;
166
167 /* Only one device down on each root port */
168 if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
169 return false;
170
171 /*
172 * Do not read more than one device on the bus directly
173 * attached to RC
174 */
175 if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
176 return false;
177
178 return true;
179}
180
181/*
182 * mobiveil_pcie_map_bus - routine to get the configuration base of either
183 * root port or endpoint
184 */
185static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
186 unsigned int devfn, int where)
187{
188 struct mobiveil_pcie *pcie = bus->sysdata;
189
190 if (!mobiveil_pcie_valid_device(bus, devfn))
191 return NULL;
192
193 if (bus->number == pcie->root_bus_nr) {
194 /* RC config access */
195 return pcie->csr_axi_slave_base + where;
196 }
197
198 /*
199 * EP config access (in Config/APIO space)
200 * Program PEX Address base (31..16 bits) with appropriate value
201 * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
202 * Relies on pci_lock serialization
203 */
204 csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
205 PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
206 PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
207 PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
208 return pcie->config_axi_slave_base + where;
209}
210
211static struct pci_ops mobiveil_pcie_ops = {
212 .map_bus = mobiveil_pcie_map_bus,
213 .read = pci_generic_config_read,
214 .write = pci_generic_config_write,
215};
216
217static void mobiveil_pcie_isr(struct irq_desc *desc)
218{
219 struct irq_chip *chip = irq_desc_get_chip(desc);
220 struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
221 struct device *dev = &pcie->pdev->dev;
222 struct mobiveil_msi *msi = &pcie->msi;
223 u32 msi_data, msi_addr_lo, msi_addr_hi;
224 u32 intr_status, msi_status;
225 unsigned long shifted_status;
226 u32 bit, virq, val, mask;
227
228 /*
229 * The core provides a single interrupt for both INTx/MSI messages.
230 * So we'll read both INTx and MSI status
231 */
232
233 chained_irq_enter(chip, desc);
234
235 /* read INTx status */
236 val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
237 mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
238 intr_status = val & mask;
239
240 /* Handle INTx */
241 if (intr_status & PAB_INTP_INTX_MASK) {
242 shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
243 PAB_INTX_START;
244 do {
245 for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
246 virq = irq_find_mapping(pcie->intx_domain,
247 bit + 1);
248 if (virq)
249 generic_handle_irq(virq);
250 else
251 dev_err_ratelimited(dev,
252 "unexpected IRQ, INT%d\n", bit);
253
254 /* clear interrupt */
255 csr_writel(pcie,
256 shifted_status << PAB_INTX_START,
257 PAB_INTP_AMBA_MISC_STAT);
258 }
259 } while ((shifted_status >> PAB_INTX_START) != 0);
260 }
261
262 /* read extra MSI status register */
263 msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
264
265 /* handle MSI interrupts */
266 while (msi_status & 1) {
267 msi_data = readl_relaxed(pcie->apb_csr_base
268 + MSI_DATA_OFFSET);
269
270 /*
271 * MSI_STATUS_OFFSET register gets updated to zero
272 * once we pop not only the MSI data but also address
273 * from MSI hardware FIFO. So keeping these following
274 * two dummy reads.
275 */
276 msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
277 MSI_ADDR_L_OFFSET);
278 msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
279 MSI_ADDR_H_OFFSET);
280 dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
281 msi_data, msi_addr_hi, msi_addr_lo);
282
283 virq = irq_find_mapping(msi->dev_domain, msi_data);
284 if (virq)
285 generic_handle_irq(virq);
286
287 msi_status = readl_relaxed(pcie->apb_csr_base +
288 MSI_STATUS_OFFSET);
289 }
290
291 /* Clear the interrupt status */
292 csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
293 chained_irq_exit(chip, desc);
294}
295
296static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
297{
298 struct device *dev = &pcie->pdev->dev;
299 struct platform_device *pdev = pcie->pdev;
300 struct device_node *node = dev->of_node;
301 struct resource *res;
302 const char *type;
303
304 type = of_get_property(node, "device_type", NULL);
305 if (!type || strcmp(type, "pci")) {
306 dev_err(dev, "invalid \"device_type\" %s\n", type);
307 return -EINVAL;
308 }
309
310 /* map config resource */
311 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
312 "config_axi_slave");
313 pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
314 if (IS_ERR(pcie->config_axi_slave_base))
315 return PTR_ERR(pcie->config_axi_slave_base);
316 pcie->ob_io_res = res;
317
318 /* map csr resource */
319 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
320 "csr_axi_slave");
321 pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
322 if (IS_ERR(pcie->csr_axi_slave_base))
323 return PTR_ERR(pcie->csr_axi_slave_base);
324 pcie->pcie_reg_base = res->start;
325
326 /* map MSI config resource */
327 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
328 pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
329 if (IS_ERR(pcie->apb_csr_base))
330 return PTR_ERR(pcie->apb_csr_base);
331
332 /* read the number of windows requested */
333 if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
334 pcie->apio_wins = MAX_PIO_WINDOWS;
335
336 if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
337 pcie->ppio_wins = MAX_PIO_WINDOWS;
338
339 pcie->irq = platform_get_irq(pdev, 0);
340 if (pcie->irq <= 0) {
341 dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
342 return -ENODEV;
343 }
344
345 irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
346
347 return 0;
348}
349
350/*
351 * select_paged_register - routine to access paged register of root complex
352 *
353 * registers of RC are paged, for this scheme to work
354 * extracted higher 6 bits of the offset will be written to pg_sel
355 * field of PAB_CTRL register and rest of the lower 10 bits enabled with
356 * PAGE_SEL_EN are used as offset of the register.
357 */
358static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
359{
360 int pab_ctrl_dw, pg_sel;
361
362 /* clear pg_sel field */
363 pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
364 pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
365
366 /* set pg_sel field */
367 pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
368 pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
369 csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
370}
371
372static void write_paged_register(struct mobiveil_pcie *pcie,
373 u32 val, u32 offset)
374{
375 u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
376
377 select_paged_register(pcie, offset);
378 csr_writel(pcie, val, off);
379}
380
381static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
382{
383 u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
384
385 select_paged_register(pcie, offset);
386 return csr_readl(pcie, off);
387}
388
389static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
390 int pci_addr, u32 type, u64 size)
391{
392 int pio_ctrl_val;
393 int amap_ctrl_dw;
394 u64 size64 = ~(size - 1);
395
396 if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
397 dev_err(&pcie->pdev->dev,
398 "ERROR: max inbound windows reached !\n");
399 return;
400 }
401
402 pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
403 csr_writel(pcie,
404 pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
405 amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
406 amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
407 amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
408
409 write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
410 PAB_PEX_AMAP_CTRL(win_num));
411
412 write_paged_register(pcie, upper_32_bits(size64),
413 PAB_EXT_PEX_AMAP_SIZEN(win_num));
414
415 write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
416 write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
417 write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
418}
419
420/*
421 * routine to program the outbound windows
422 */
423static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
424 u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
425{
426
427 u32 value, type;
428 u64 size64 = ~(size - 1);
429
430 if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
431 dev_err(&pcie->pdev->dev,
432 "ERROR: max outbound windows reached !\n");
433 return;
434 }
435
436 /*
437 * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
438 * to 4 KB in PAB_AXI_AMAP_CTRL register
439 */
440 type = config_io_bit;
441 value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
442 csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
443 lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
444
445 write_paged_register(pcie, upper_32_bits(size64),
446 PAB_EXT_AXI_AMAP_SIZE(win_num));
447
448 /*
449 * program AXI window base with appropriate value in
450 * PAB_AXI_AMAP_AXI_WIN0 register
451 */
452 value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
453 csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
454 PAB_AXI_AMAP_AXI_WIN(win_num));
455
456 value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
457
458 csr_writel(pcie, lower_32_bits(pci_addr),
459 PAB_AXI_AMAP_PEX_WIN_L(win_num));
460 csr_writel(pcie, upper_32_bits(pci_addr),
461 PAB_AXI_AMAP_PEX_WIN_H(win_num));
462
463 pcie->ob_wins_configured++;
464}
465
466static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
467{
468 int retries;
469
470 /* check if the link is up or not */
471 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
472 if (mobiveil_pcie_link_up(pcie))
473 return 0;
474
475 usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
476 }
477 dev_err(&pcie->pdev->dev, "link never came up\n");
478 return -ETIMEDOUT;
479}
480
481static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
482{
483 phys_addr_t msg_addr = pcie->pcie_reg_base;
484 struct mobiveil_msi *msi = &pcie->msi;
485
486 pcie->msi.num_of_vectors = PCI_NUM_MSI;
487 msi->msi_pages_phys = (phys_addr_t)msg_addr;
488
489 writel_relaxed(lower_32_bits(msg_addr),
490 pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
491 writel_relaxed(upper_32_bits(msg_addr),
492 pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
493 writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
494 writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
495}
496
497static int mobiveil_host_init(struct mobiveil_pcie *pcie)
498{
499 u32 value, pab_ctrl, type = 0;
500 int err;
501 struct resource_entry *win, *tmp;
502
503 err = mobiveil_bringup_link(pcie);
504 if (err) {
505 dev_info(&pcie->pdev->dev, "link bring-up failed\n");
506 return err;
507 }
508
509 /*
510 * program Bus Master Enable Bit in Command Register in PAB Config
511 * Space
512 */
513 value = csr_readl(pcie, PCI_COMMAND);
514 csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
515 PCI_COMMAND_MASTER, PCI_COMMAND);
516
517 /*
518 * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
519 * register
520 */
521 pab_ctrl = csr_readl(pcie, PAB_CTRL);
522 csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
523 (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
524
525 csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
526 PAB_INTP_AMBA_MISC_ENB);
527
528 /*
529 * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
530 * PAB_AXI_PIO_CTRL Register
531 */
532 value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
533 csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
534
535 /*
536 * we'll program one outbound window for config reads and
537 * another default inbound window for all the upstream traffic
538 * rest of the outbound windows will be configured according to
539 * the "ranges" field defined in device tree
540 */
541
542 /* config outbound translation window */
543 program_ob_windows(pcie, pcie->ob_wins_configured,
544 pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
545 resource_size(pcie->ob_io_res));
546
547 /* memory inbound translation window */
548 program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
549
550 /* Get the I/O and memory ranges from DT */
551 resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
552 type = 0;
553 if (resource_type(win->res) == IORESOURCE_MEM)
554 type = MEM_WINDOW_TYPE;
555 if (resource_type(win->res) == IORESOURCE_IO)
556 type = IO_WINDOW_TYPE;
557 if (type) {
558 /* configure outbound translation window */
559 program_ob_windows(pcie, pcie->ob_wins_configured,
560 win->res->start, 0, type,
561 resource_size(win->res));
562 }
563 }
564
565 /* setup MSI hardware registers */
566 mobiveil_pcie_enable_msi(pcie);
567
568 return err;
569}
570
571static void mobiveil_mask_intx_irq(struct irq_data *data)
572{
573 struct irq_desc *desc = irq_to_desc(data->irq);
574 struct mobiveil_pcie *pcie;
575 unsigned long flags;
576 u32 mask, shifted_val;
577
578 pcie = irq_desc_get_chip_data(desc);
579 mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
580 raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
581 shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
582 csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
583 raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
584}
585
586static void mobiveil_unmask_intx_irq(struct irq_data *data)
587{
588 struct irq_desc *desc = irq_to_desc(data->irq);
589 struct mobiveil_pcie *pcie;
590 unsigned long flags;
591 u32 shifted_val, mask;
592
593 pcie = irq_desc_get_chip_data(desc);
594 mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
595 raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
596 shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
597 csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
598 raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
599}
600
601static struct irq_chip intx_irq_chip = {
602 .name = "mobiveil_pcie:intx",
603 .irq_enable = mobiveil_unmask_intx_irq,
604 .irq_disable = mobiveil_mask_intx_irq,
605 .irq_mask = mobiveil_mask_intx_irq,
606 .irq_unmask = mobiveil_unmask_intx_irq,
607};
608
609/* routine to setup the INTx related data */
610static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
611 irq_hw_number_t hwirq)
612{
613 irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
614 irq_set_chip_data(irq, domain->host_data);
615 return 0;
616}
617
618/* INTx domain operations structure */
619static const struct irq_domain_ops intx_domain_ops = {
620 .map = mobiveil_pcie_intx_map,
621};
622
623static struct irq_chip mobiveil_msi_irq_chip = {
624 .name = "Mobiveil PCIe MSI",
625 .irq_mask = pci_msi_mask_irq,
626 .irq_unmask = pci_msi_unmask_irq,
627};
628
629static struct msi_domain_info mobiveil_msi_domain_info = {
630 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
631 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
632 .chip = &mobiveil_msi_irq_chip,
633};
634
635static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
636{
637 struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
638 phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
639
640 msg->address_lo = lower_32_bits(addr);
641 msg->address_hi = upper_32_bits(addr);
642 msg->data = data->hwirq;
643
644 dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
645 (int)data->hwirq, msg->address_hi, msg->address_lo);
646}
647
648static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
649 const struct cpumask *mask, bool force)
650{
651 return -EINVAL;
652}
653
654static struct irq_chip mobiveil_msi_bottom_irq_chip = {
655 .name = "Mobiveil MSI",
656 .irq_compose_msi_msg = mobiveil_compose_msi_msg,
657 .irq_set_affinity = mobiveil_msi_set_affinity,
658};
659
660static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
661 unsigned int virq, unsigned int nr_irqs, void *args)
662{
663 struct mobiveil_pcie *pcie = domain->host_data;
664 struct mobiveil_msi *msi = &pcie->msi;
665 unsigned long bit;
666
667 WARN_ON(nr_irqs != 1);
668 mutex_lock(&msi->lock);
669
670 bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
671 if (bit >= msi->num_of_vectors) {
672 mutex_unlock(&msi->lock);
673 return -ENOSPC;
674 }
675
676 set_bit(bit, msi->msi_irq_in_use);
677
678 mutex_unlock(&msi->lock);
679
680 irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
681 domain->host_data, handle_level_irq,
682 NULL, NULL);
683 return 0;
684}
685
686static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
687 unsigned int virq, unsigned int nr_irqs)
688{
689 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
690 struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
691 struct mobiveil_msi *msi = &pcie->msi;
692
693 mutex_lock(&msi->lock);
694
695 if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
696 dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
697 d->hwirq);
698 } else {
699 __clear_bit(d->hwirq, msi->msi_irq_in_use);
700 }
701
702 mutex_unlock(&msi->lock);
703}
704static const struct irq_domain_ops msi_domain_ops = {
705 .alloc = mobiveil_irq_msi_domain_alloc,
706 .free = mobiveil_irq_msi_domain_free,
707};
708
709static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
710{
711 struct device *dev = &pcie->pdev->dev;
712 struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
713 struct mobiveil_msi *msi = &pcie->msi;
714
715 mutex_init(&pcie->msi.lock);
716 msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
717 &msi_domain_ops, pcie);
718 if (!msi->dev_domain) {
719 dev_err(dev, "failed to create IRQ domain\n");
720 return -ENOMEM;
721 }
722
723 msi->msi_domain = pci_msi_create_irq_domain(fwnode,
724 &mobiveil_msi_domain_info, msi->dev_domain);
725 if (!msi->msi_domain) {
726 dev_err(dev, "failed to create MSI domain\n");
727 irq_domain_remove(msi->dev_domain);
728 return -ENOMEM;
729 }
730 return 0;
731}
732
733static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
734{
735 struct device *dev = &pcie->pdev->dev;
736 struct device_node *node = dev->of_node;
737 int ret;
738
739 /* setup INTx */
740 pcie->intx_domain = irq_domain_add_linear(node,
741 PCI_NUM_INTX, &intx_domain_ops, pcie);
742
743 if (!pcie->intx_domain) {
744 dev_err(dev, "Failed to get a INTx IRQ domain\n");
745 return -ENODEV;
746 }
747
748 raw_spin_lock_init(&pcie->intx_mask_lock);
749
750 /* setup MSI */
751 ret = mobiveil_allocate_msi_domains(pcie);
752 if (ret)
753 return ret;
754
755 return 0;
756}
757
758static int mobiveil_pcie_probe(struct platform_device *pdev)
759{
760 struct mobiveil_pcie *pcie;
761 struct pci_bus *bus;
762 struct pci_bus *child;
763 struct pci_host_bridge *bridge;
764 struct device *dev = &pdev->dev;
765 resource_size_t iobase;
766 int ret;
767
768 /* allocate the PCIe port */
769 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
770 if (!bridge)
771 return -ENODEV;
772
773 pcie = pci_host_bridge_priv(bridge);
774 if (!pcie)
775 return -ENOMEM;
776
777 pcie->pdev = pdev;
778
779 ret = mobiveil_pcie_parse_dt(pcie);
780 if (ret) {
781 dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
782 return ret;
783 }
784
785 INIT_LIST_HEAD(&pcie->resources);
786
787 /* parse the host bridge base addresses from the device tree file */
788 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
789 &pcie->resources, &iobase);
790 if (ret) {
791 dev_err(dev, "Getting bridge resources failed\n");
792 return -ENOMEM;
793 }
794
795 /*
796 * configure all inbound and outbound windows and prepare the RC for
797 * config access
798 */
799 ret = mobiveil_host_init(pcie);
800 if (ret) {
801 dev_err(dev, "Failed to initialize host\n");
802 goto error;
803 }
804
805 /* fixup for PCIe class register */
806 csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
807
808 /* initialize the IRQ domains */
809 ret = mobiveil_pcie_init_irq_domain(pcie);
810 if (ret) {
811 dev_err(dev, "Failed creating IRQ Domain\n");
812 goto error;
813 }
814
815 ret = devm_request_pci_bus_resources(dev, &pcie->resources);
816 if (ret)
817 goto error;
818
819 /* Initialize bridge */
820 list_splice_init(&pcie->resources, &bridge->windows);
821 bridge->dev.parent = dev;
822 bridge->sysdata = pcie;
823 bridge->busnr = pcie->root_bus_nr;
824 bridge->ops = &mobiveil_pcie_ops;
825 bridge->map_irq = of_irq_parse_and_map_pci;
826 bridge->swizzle_irq = pci_common_swizzle;
827
828 /* setup the kernel resources for the newly added PCIe root bus */
829 ret = pci_scan_root_bus_bridge(bridge);
830 if (ret)
831 goto error;
832
833 bus = bridge->bus;
834
835 pci_assign_unassigned_bus_resources(bus);
836 list_for_each_entry(child, &bus->children, node)
837 pcie_bus_configure_settings(child);
838 pci_bus_add_devices(bus);
839
840 return 0;
841error:
842 pci_free_resource_list(&pcie->resources);
843 return ret;
844}
845
846static const struct of_device_id mobiveil_pcie_of_match[] = {
847 {.compatible = "mbvl,gpex40-pcie",},
848 {},
849};
850
851MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
852
853static struct platform_driver mobiveil_pcie_driver = {
854 .probe = mobiveil_pcie_probe,
855 .driver = {
856 .name = "mobiveil-pcie",
857 .of_match_table = mobiveil_pcie_of_match,
858 .suppress_bind_attrs = true,
859 },
860};
861
862builtin_platform_driver(mobiveil_pcie_driver);
863
864MODULE_LICENSE("GPL v2");
865MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
866MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 6ab28f29ac6a..874d75c9ee4a 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -11,6 +11,7 @@
11 * Author: Phil Edworthy <phil.edworthy@renesas.com> 11 * Author: Phil Edworthy <phil.edworthy@renesas.com>
12 */ 12 */
13 13
14#include <linux/bitops.h>
14#include <linux/clk.h> 15#include <linux/clk.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -24,18 +25,23 @@
24#include <linux/of_pci.h> 25#include <linux/of_pci.h>
25#include <linux/of_platform.h> 26#include <linux/of_platform.h>
26#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/phy/phy.h>
27#include <linux/platform_device.h> 29#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
29#include <linux/slab.h> 31#include <linux/slab.h>
30 32
33#include "../pci.h"
34
31#define PCIECAR 0x000010 35#define PCIECAR 0x000010
32#define PCIECCTLR 0x000018 36#define PCIECCTLR 0x000018
33#define CONFIG_SEND_ENABLE (1 << 31) 37#define CONFIG_SEND_ENABLE BIT(31)
34#define TYPE0 (0 << 8) 38#define TYPE0 (0 << 8)
35#define TYPE1 (1 << 8) 39#define TYPE1 BIT(8)
36#define PCIECDR 0x000020 40#define PCIECDR 0x000020
37#define PCIEMSR 0x000028 41#define PCIEMSR 0x000028
38#define PCIEINTXR 0x000400 42#define PCIEINTXR 0x000400
43#define PCIEPHYSR 0x0007f0
44#define PHYRDY BIT(0)
39#define PCIEMSITXR 0x000840 45#define PCIEMSITXR 0x000840
40 46
41/* Transfer control */ 47/* Transfer control */
@@ -44,7 +50,7 @@
44#define PCIETSTR 0x02004 50#define PCIETSTR 0x02004
45#define DATA_LINK_ACTIVE 1 51#define DATA_LINK_ACTIVE 1
46#define PCIEERRFR 0x02020 52#define PCIEERRFR 0x02020
47#define UNSUPPORTED_REQUEST (1 << 4) 53#define UNSUPPORTED_REQUEST BIT(4)
48#define PCIEMSIFR 0x02044 54#define PCIEMSIFR 0x02044
49#define PCIEMSIALR 0x02048 55#define PCIEMSIALR 0x02048
50#define MSIFE 1 56#define MSIFE 1
@@ -57,17 +63,17 @@
57/* local address reg & mask */ 63/* local address reg & mask */
58#define PCIELAR(x) (0x02200 + ((x) * 0x20)) 64#define PCIELAR(x) (0x02200 + ((x) * 0x20))
59#define PCIELAMR(x) (0x02208 + ((x) * 0x20)) 65#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
60#define LAM_PREFETCH (1 << 3) 66#define LAM_PREFETCH BIT(3)
61#define LAM_64BIT (1 << 2) 67#define LAM_64BIT BIT(2)
62#define LAR_ENABLE (1 << 1) 68#define LAR_ENABLE BIT(1)
63 69
64/* PCIe address reg & mask */ 70/* PCIe address reg & mask */
65#define PCIEPALR(x) (0x03400 + ((x) * 0x20)) 71#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
66#define PCIEPAUR(x) (0x03404 + ((x) * 0x20)) 72#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
67#define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) 73#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
68#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) 74#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
69#define PAR_ENABLE (1 << 31) 75#define PAR_ENABLE BIT(31)
70#define IO_SPACE (1 << 8) 76#define IO_SPACE BIT(8)
71 77
72/* Configuration */ 78/* Configuration */
73#define PCICONF(x) (0x010000 + ((x) * 0x4)) 79#define PCICONF(x) (0x010000 + ((x) * 0x4))
@@ -79,47 +85,46 @@
79#define IDSETR1 0x011004 85#define IDSETR1 0x011004
80#define TLCTLR 0x011048 86#define TLCTLR 0x011048
81#define MACSR 0x011054 87#define MACSR 0x011054
82#define SPCHGFIN (1 << 4) 88#define SPCHGFIN BIT(4)
83#define SPCHGFAIL (1 << 6) 89#define SPCHGFAIL BIT(6)
84#define SPCHGSUC (1 << 7) 90#define SPCHGSUC BIT(7)
85#define LINK_SPEED (0xf << 16) 91#define LINK_SPEED (0xf << 16)
86#define LINK_SPEED_2_5GTS (1 << 16) 92#define LINK_SPEED_2_5GTS (1 << 16)
87#define LINK_SPEED_5_0GTS (2 << 16) 93#define LINK_SPEED_5_0GTS (2 << 16)
88#define MACCTLR 0x011058 94#define MACCTLR 0x011058
89#define SPEED_CHANGE (1 << 24) 95#define SPEED_CHANGE BIT(24)
90#define SCRAMBLE_DISABLE (1 << 27) 96#define SCRAMBLE_DISABLE BIT(27)
91#define MACS2R 0x011078 97#define MACS2R 0x011078
92#define MACCGSPSETR 0x011084 98#define MACCGSPSETR 0x011084
93#define SPCNGRSN (1 << 31) 99#define SPCNGRSN BIT(31)
94 100
95/* R-Car H1 PHY */ 101/* R-Car H1 PHY */
96#define H1_PCIEPHYADRR 0x04000c 102#define H1_PCIEPHYADRR 0x04000c
97#define WRITE_CMD (1 << 16) 103#define WRITE_CMD BIT(16)
98#define PHY_ACK (1 << 24) 104#define PHY_ACK BIT(24)
99#define RATE_POS 12 105#define RATE_POS 12
100#define LANE_POS 8 106#define LANE_POS 8
101#define ADR_POS 0 107#define ADR_POS 0
102#define H1_PCIEPHYDOUTR 0x040014 108#define H1_PCIEPHYDOUTR 0x040014
103#define H1_PCIEPHYSR 0x040018
104 109
105/* R-Car Gen2 PHY */ 110/* R-Car Gen2 PHY */
106#define GEN2_PCIEPHYADDR 0x780 111#define GEN2_PCIEPHYADDR 0x780
107#define GEN2_PCIEPHYDATA 0x784 112#define GEN2_PCIEPHYDATA 0x784
108#define GEN2_PCIEPHYCTRL 0x78c 113#define GEN2_PCIEPHYCTRL 0x78c
109 114
110#define INT_PCI_MSI_NR 32 115#define INT_PCI_MSI_NR 32
111 116
112#define RCONF(x) (PCICONF(0)+(x)) 117#define RCONF(x) (PCICONF(0) + (x))
113#define RPMCAP(x) (PMCAP(0)+(x)) 118#define RPMCAP(x) (PMCAP(0) + (x))
114#define REXPCAP(x) (EXPCAP(0)+(x)) 119#define REXPCAP(x) (EXPCAP(0) + (x))
115#define RVCCAP(x) (VCCAP(0)+(x)) 120#define RVCCAP(x) (VCCAP(0) + (x))
116 121
117#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) 122#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
118#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) 123#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
119#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) 124#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
120 125
121#define RCAR_PCI_MAX_RESOURCES 4 126#define RCAR_PCI_MAX_RESOURCES 4
122#define MAX_NR_INBOUND_MAPS 6 127#define MAX_NR_INBOUND_MAPS 6
123 128
124struct rcar_msi { 129struct rcar_msi {
125 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 130 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -139,10 +144,10 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
139/* Structure representing the PCIe interface */ 144/* Structure representing the PCIe interface */
140struct rcar_pcie { 145struct rcar_pcie {
141 struct device *dev; 146 struct device *dev;
147 struct phy *phy;
142 void __iomem *base; 148 void __iomem *base;
143 struct list_head resources; 149 struct list_head resources;
144 int root_bus_nr; 150 int root_bus_nr;
145 struct clk *clk;
146 struct clk *bus_clk; 151 struct clk *bus_clk;
147 struct rcar_msi msi; 152 struct rcar_msi msi;
148}; 153};
@@ -527,12 +532,12 @@ static void phy_write_reg(struct rcar_pcie *pcie,
527 phy_wait_for_ack(pcie); 532 phy_wait_for_ack(pcie);
528} 533}
529 534
530static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) 535static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
531{ 536{
532 unsigned int timeout = 10; 537 unsigned int timeout = 10;
533 538
534 while (timeout--) { 539 while (timeout--) {
535 if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) 540 if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
536 return 0; 541 return 0;
537 542
538 msleep(5); 543 msleep(5);
@@ -541,6 +546,21 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
541 return -ETIMEDOUT; 546 return -ETIMEDOUT;
542} 547}
543 548
549static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
550{
551 unsigned int timeout = 10000;
552
553 while (timeout--) {
554 if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
555 return 0;
556
557 udelay(5);
558 cpu_relax();
559 }
560
561 return -ETIMEDOUT;
562}
563
544static int rcar_pcie_hw_init(struct rcar_pcie *pcie) 564static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
545{ 565{
546 int err; 566 int err;
@@ -551,6 +571,10 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
551 /* Set mode */ 571 /* Set mode */
552 rcar_pci_write_reg(pcie, 1, PCIEMSR); 572 rcar_pci_write_reg(pcie, 1, PCIEMSR);
553 573
574 err = rcar_pcie_wait_for_phyrdy(pcie);
575 if (err)
576 return err;
577
554 /* 578 /*
555 * Initial header for port config space is type 1, set the device 579 * Initial header for port config space is type 1, set the device
556 * class to match. Hardware takes care of propagating the IDSETR 580 * class to match. Hardware takes care of propagating the IDSETR
@@ -605,10 +629,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
605 return 0; 629 return 0;
606} 630}
607 631
608static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie) 632static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
609{ 633{
610 unsigned int timeout = 10;
611
612 /* Initialize the phy */ 634 /* Initialize the phy */
613 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); 635 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
614 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); 636 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
@@ -627,17 +649,10 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
627 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); 649 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
628 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); 650 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
629 651
630 while (timeout--) { 652 return 0;
631 if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
632 return rcar_pcie_hw_init(pcie);
633
634 msleep(5);
635 }
636
637 return -ETIMEDOUT;
638} 653}
639 654
640static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie) 655static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
641{ 656{
642 /* 657 /*
643 * These settings come from the R-Car Series, 2nd Generation User's 658 * These settings come from the R-Car Series, 2nd Generation User's
@@ -654,7 +669,18 @@ static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
654 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); 669 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
655 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); 670 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
656 671
657 return rcar_pcie_hw_init(pcie); 672 return 0;
673}
674
675static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
676{
677 int err;
678
679 err = phy_init(pcie->phy);
680 if (err)
681 return err;
682
683 return phy_power_on(pcie->phy);
658} 684}
659 685
660static int rcar_msi_alloc(struct rcar_msi *chip) 686static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -842,6 +868,20 @@ static const struct irq_domain_ops msi_domain_ops = {
842 .map = rcar_msi_map, 868 .map = rcar_msi_map,
843}; 869};
844 870
871static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
872{
873 struct rcar_msi *msi = &pcie->msi;
874 int i, irq;
875
876 for (i = 0; i < INT_PCI_MSI_NR; i++) {
877 irq = irq_find_mapping(msi->domain, i);
878 if (irq > 0)
879 irq_dispose_mapping(irq);
880 }
881
882 irq_domain_remove(msi->domain);
883}
884
845static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) 885static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
846{ 886{
847 struct device *dev = pcie->dev; 887 struct device *dev = pcie->dev;
@@ -896,16 +936,35 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
896 return 0; 936 return 0;
897 937
898err: 938err:
899 irq_domain_remove(msi->domain); 939 rcar_pcie_unmap_msi(pcie);
900 return err; 940 return err;
901} 941}
902 942
943static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
944{
945 struct rcar_msi *msi = &pcie->msi;
946
947 /* Disable all MSI interrupts */
948 rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
949
950 /* Disable address decoding of the MSI interrupt, MSIFE */
951 rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
952
953 free_pages(msi->pages, 0);
954
955 rcar_pcie_unmap_msi(pcie);
956}
957
903static int rcar_pcie_get_resources(struct rcar_pcie *pcie) 958static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
904{ 959{
905 struct device *dev = pcie->dev; 960 struct device *dev = pcie->dev;
906 struct resource res; 961 struct resource res;
907 int err, i; 962 int err, i;
908 963
964 pcie->phy = devm_phy_optional_get(dev, "pcie");
965 if (IS_ERR(pcie->phy))
966 return PTR_ERR(pcie->phy);
967
909 err = of_address_to_resource(dev->of_node, 0, &res); 968 err = of_address_to_resource(dev->of_node, 0, &res);
910 if (err) 969 if (err)
911 return err; 970 return err;
@@ -914,30 +973,17 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
914 if (IS_ERR(pcie->base)) 973 if (IS_ERR(pcie->base))
915 return PTR_ERR(pcie->base); 974 return PTR_ERR(pcie->base);
916 975
917 pcie->clk = devm_clk_get(dev, "pcie");
918 if (IS_ERR(pcie->clk)) {
919 dev_err(dev, "cannot get platform clock\n");
920 return PTR_ERR(pcie->clk);
921 }
922 err = clk_prepare_enable(pcie->clk);
923 if (err)
924 return err;
925
926 pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); 976 pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
927 if (IS_ERR(pcie->bus_clk)) { 977 if (IS_ERR(pcie->bus_clk)) {
928 dev_err(dev, "cannot get pcie bus clock\n"); 978 dev_err(dev, "cannot get pcie bus clock\n");
929 err = PTR_ERR(pcie->bus_clk); 979 return PTR_ERR(pcie->bus_clk);
930 goto fail_clk;
931 } 980 }
932 err = clk_prepare_enable(pcie->bus_clk);
933 if (err)
934 goto fail_clk;
935 981
936 i = irq_of_parse_and_map(dev->of_node, 0); 982 i = irq_of_parse_and_map(dev->of_node, 0);
937 if (!i) { 983 if (!i) {
938 dev_err(dev, "cannot get platform resources for msi interrupt\n"); 984 dev_err(dev, "cannot get platform resources for msi interrupt\n");
939 err = -ENOENT; 985 err = -ENOENT;
940 goto err_map_reg; 986 goto err_irq1;
941 } 987 }
942 pcie->msi.irq1 = i; 988 pcie->msi.irq1 = i;
943 989
@@ -945,17 +991,15 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
945 if (!i) { 991 if (!i) {
946 dev_err(dev, "cannot get platform resources for msi interrupt\n"); 992 dev_err(dev, "cannot get platform resources for msi interrupt\n");
947 err = -ENOENT; 993 err = -ENOENT;
948 goto err_map_reg; 994 goto err_irq2;
949 } 995 }
950 pcie->msi.irq2 = i; 996 pcie->msi.irq2 = i;
951 997
952 return 0; 998 return 0;
953 999
954err_map_reg: 1000err_irq2:
955 clk_disable_unprepare(pcie->bus_clk); 1001 irq_dispose_mapping(pcie->msi.irq1);
956fail_clk: 1002err_irq1:
957 clk_disable_unprepare(pcie->clk);
958
959 return err; 1003 return err;
960} 1004}
961 1005
@@ -1051,63 +1095,28 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
1051} 1095}
1052 1096
1053static const struct of_device_id rcar_pcie_of_match[] = { 1097static const struct of_device_id rcar_pcie_of_match[] = {
1054 { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, 1098 { .compatible = "renesas,pcie-r8a7779",
1099 .data = rcar_pcie_phy_init_h1 },
1055 { .compatible = "renesas,pcie-r8a7790", 1100 { .compatible = "renesas,pcie-r8a7790",
1056 .data = rcar_pcie_hw_init_gen2 }, 1101 .data = rcar_pcie_phy_init_gen2 },
1057 { .compatible = "renesas,pcie-r8a7791", 1102 { .compatible = "renesas,pcie-r8a7791",
1058 .data = rcar_pcie_hw_init_gen2 }, 1103 .data = rcar_pcie_phy_init_gen2 },
1059 { .compatible = "renesas,pcie-rcar-gen2", 1104 { .compatible = "renesas,pcie-rcar-gen2",
1060 .data = rcar_pcie_hw_init_gen2 }, 1105 .data = rcar_pcie_phy_init_gen2 },
1061 { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init }, 1106 { .compatible = "renesas,pcie-r8a7795",
1062 { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init }, 1107 .data = rcar_pcie_phy_init_gen3 },
1108 { .compatible = "renesas,pcie-rcar-gen3",
1109 .data = rcar_pcie_phy_init_gen3 },
1063 {}, 1110 {},
1064}; 1111};
1065 1112
1066static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
1067{
1068 int err;
1069 struct device *dev = pci->dev;
1070 struct device_node *np = dev->of_node;
1071 resource_size_t iobase;
1072 struct resource_entry *win, *tmp;
1073
1074 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
1075 &iobase);
1076 if (err)
1077 return err;
1078
1079 err = devm_request_pci_bus_resources(dev, &pci->resources);
1080 if (err)
1081 goto out_release_res;
1082
1083 resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
1084 struct resource *res = win->res;
1085
1086 if (resource_type(res) == IORESOURCE_IO) {
1087 err = pci_remap_iospace(res, iobase);
1088 if (err) {
1089 dev_warn(dev, "error %d: failed to map resource %pR\n",
1090 err, res);
1091
1092 resource_list_destroy_entry(win);
1093 }
1094 }
1095 }
1096
1097 return 0;
1098
1099out_release_res:
1100 pci_free_resource_list(&pci->resources);
1101 return err;
1102}
1103
1104static int rcar_pcie_probe(struct platform_device *pdev) 1113static int rcar_pcie_probe(struct platform_device *pdev)
1105{ 1114{
1106 struct device *dev = &pdev->dev; 1115 struct device *dev = &pdev->dev;
1107 struct rcar_pcie *pcie; 1116 struct rcar_pcie *pcie;
1108 unsigned int data; 1117 unsigned int data;
1109 int err; 1118 int err;
1110 int (*hw_init_fn)(struct rcar_pcie *); 1119 int (*phy_init_fn)(struct rcar_pcie *);
1111 struct pci_host_bridge *bridge; 1120 struct pci_host_bridge *bridge;
1112 1121
1113 bridge = pci_alloc_host_bridge(sizeof(*pcie)); 1122 bridge = pci_alloc_host_bridge(sizeof(*pcie));
@@ -1118,36 +1127,45 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1118 1127
1119 pcie->dev = dev; 1128 pcie->dev = dev;
1120 1129
1121 INIT_LIST_HEAD(&pcie->resources); 1130 err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
1122
1123 err = rcar_pcie_parse_request_of_pci_ranges(pcie);
1124 if (err) 1131 if (err)
1125 goto err_free_bridge; 1132 goto err_free_bridge;
1126 1133
1134 pm_runtime_enable(pcie->dev);
1135 err = pm_runtime_get_sync(pcie->dev);
1136 if (err < 0) {
1137 dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
1138 goto err_pm_disable;
1139 }
1140
1127 err = rcar_pcie_get_resources(pcie); 1141 err = rcar_pcie_get_resources(pcie);
1128 if (err < 0) { 1142 if (err < 0) {
1129 dev_err(dev, "failed to request resources: %d\n", err); 1143 dev_err(dev, "failed to request resources: %d\n", err);
1130 goto err_free_resource_list; 1144 goto err_pm_put;
1145 }
1146
1147 err = clk_prepare_enable(pcie->bus_clk);
1148 if (err) {
1149 dev_err(dev, "failed to enable bus clock: %d\n", err);
1150 goto err_unmap_msi_irqs;
1131 } 1151 }
1132 1152
1133 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); 1153 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
1134 if (err) 1154 if (err)
1135 goto err_free_resource_list; 1155 goto err_clk_disable;
1136 1156
1137 pm_runtime_enable(dev); 1157 phy_init_fn = of_device_get_match_data(dev);
1138 err = pm_runtime_get_sync(dev); 1158 err = phy_init_fn(pcie);
1139 if (err < 0) { 1159 if (err) {
1140 dev_err(dev, "pm_runtime_get_sync failed\n"); 1160 dev_err(dev, "failed to init PCIe PHY\n");
1141 goto err_pm_disable; 1161 goto err_clk_disable;
1142 } 1162 }
1143 1163
1144 /* Failure to get a link might just be that no cards are inserted */ 1164 /* Failure to get a link might just be that no cards are inserted */
1145 hw_init_fn = of_device_get_match_data(dev); 1165 if (rcar_pcie_hw_init(pcie)) {
1146 err = hw_init_fn(pcie);
1147 if (err) {
1148 dev_info(dev, "PCIe link down\n"); 1166 dev_info(dev, "PCIe link down\n");
1149 err = -ENODEV; 1167 err = -ENODEV;
1150 goto err_pm_put; 1168 goto err_clk_disable;
1151 } 1169 }
1152 1170
1153 data = rcar_pci_read_reg(pcie, MACSR); 1171 data = rcar_pci_read_reg(pcie, MACSR);
@@ -1159,24 +1177,34 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1159 dev_err(dev, 1177 dev_err(dev,
1160 "failed to enable MSI support: %d\n", 1178 "failed to enable MSI support: %d\n",
1161 err); 1179 err);
1162 goto err_pm_put; 1180 goto err_clk_disable;
1163 } 1181 }
1164 } 1182 }
1165 1183
1166 err = rcar_pcie_enable(pcie); 1184 err = rcar_pcie_enable(pcie);
1167 if (err) 1185 if (err)
1168 goto err_pm_put; 1186 goto err_msi_teardown;
1169 1187
1170 return 0; 1188 return 0;
1171 1189
1190err_msi_teardown:
1191 if (IS_ENABLED(CONFIG_PCI_MSI))
1192 rcar_pcie_teardown_msi(pcie);
1193
1194err_clk_disable:
1195 clk_disable_unprepare(pcie->bus_clk);
1196
1197err_unmap_msi_irqs:
1198 irq_dispose_mapping(pcie->msi.irq2);
1199 irq_dispose_mapping(pcie->msi.irq1);
1200
1172err_pm_put: 1201err_pm_put:
1173 pm_runtime_put(dev); 1202 pm_runtime_put(dev);
1174 1203
1175err_pm_disable: 1204err_pm_disable:
1176 pm_runtime_disable(dev); 1205 pm_runtime_disable(dev);
1177
1178err_free_resource_list:
1179 pci_free_resource_list(&pcie->resources); 1206 pci_free_resource_list(&pcie->resources);
1207
1180err_free_bridge: 1208err_free_bridge:
1181 pci_free_host_bridge(bridge); 1209 pci_free_host_bridge(bridge);
1182 1210
diff --git a/drivers/pci/host/pcie-rockchip-ep.c b/drivers/pci/host/pcie-rockchip-ep.c
new file mode 100644
index 000000000000..fc267a49a932
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-ep.c
@@ -0,0 +1,642 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Rockchip AXI PCIe endpoint controller driver
4 *
5 * Copyright (c) 2018 Rockchip, Inc.
6 *
7 * Author: Shawn Lin <shawn.lin@rock-chips.com>
8 * Simon Xue <xxm@rock-chips.com>
9 */
10
11#include <linux/configfs.h>
12#include <linux/delay.h>
13#include <linux/kernel.h>
14#include <linux/of.h>
15#include <linux/pci-epc.h>
16#include <linux/platform_device.h>
17#include <linux/pci-epf.h>
18#include <linux/sizes.h>
19
20#include "pcie-rockchip.h"
21
22/**
23 * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
24 * @rockchip: Rockchip PCIe controller
25 * @max_regions: maximum number of regions supported by hardware
26 * @ob_region_map: bitmask of mapped outbound regions
27 * @ob_addr: base addresses in the AXI bus where the outbound regions start
28 * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
29 * dedicated outbound regions is mapped.
30 * @irq_cpu_addr: base address in the CPU space where a write access triggers
31 * the sending of a memory write (MSI) / normal message (legacy
32 * IRQ) TLP through the PCIe bus.
33 * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
34 * dedicated outbound region.
35 * @irq_pci_fn: the latest PCI function that has updated the mapping of
36 * the MSI/legacy IRQ dedicated outbound region.
37 * @irq_pending: bitmask of asserted legacy IRQs.
38 */
39struct rockchip_pcie_ep {
40 struct rockchip_pcie rockchip;
41 struct pci_epc *epc;
42 u32 max_regions;
43 unsigned long ob_region_map;
44 phys_addr_t *ob_addr;
45 phys_addr_t irq_phys_addr;
46 void __iomem *irq_cpu_addr;
47 u64 irq_pci_addr;
48 u8 irq_pci_fn;
49 u8 irq_pending;
50};
51
52static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
53 u32 region)
54{
55 rockchip_pcie_write(rockchip, 0,
56 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
57 rockchip_pcie_write(rockchip, 0,
58 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
59 rockchip_pcie_write(rockchip, 0,
60 ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
61 rockchip_pcie_write(rockchip, 0,
62 ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
63 rockchip_pcie_write(rockchip, 0,
64 ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
65 rockchip_pcie_write(rockchip, 0,
66 ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
67}
68
69static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
70 u32 r, u32 type, u64 cpu_addr,
71 u64 pci_addr, size_t size)
72{
73 u64 sz = 1ULL << fls64(size - 1);
74 int num_pass_bits = ilog2(sz);
75 u32 addr0, addr1, desc0, desc1;
76 bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
77
78 /* The minimal region size is 1MB */
79 if (num_pass_bits < 8)
80 num_pass_bits = 8;
81
82 cpu_addr -= rockchip->mem_res->start;
83 addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
84 PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
85 (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
86 addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
87 desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
88 desc1 = 0;
89
90 if (is_nor_msg) {
91 rockchip_pcie_write(rockchip, 0,
92 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
93 rockchip_pcie_write(rockchip, 0,
94 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
95 rockchip_pcie_write(rockchip, desc0,
96 ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
97 rockchip_pcie_write(rockchip, desc1,
98 ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
99 } else {
100 /* PCI bus address region */
101 rockchip_pcie_write(rockchip, addr0,
102 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
103 rockchip_pcie_write(rockchip, addr1,
104 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
105 rockchip_pcie_write(rockchip, desc0,
106 ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
107 rockchip_pcie_write(rockchip, desc1,
108 ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
109
110 addr0 =
111 ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
112 (lower_32_bits(cpu_addr) &
113 PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
114 addr1 = upper_32_bits(cpu_addr);
115 }
116
117 /* CPU bus address region */
118 rockchip_pcie_write(rockchip, addr0,
119 ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
120 rockchip_pcie_write(rockchip, addr1,
121 ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
122}
123
124static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
125 struct pci_epf_header *hdr)
126{
127 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
128 struct rockchip_pcie *rockchip = &ep->rockchip;
129
130 /* All functions share the same vendor ID with function 0 */
131 if (fn == 0) {
132 u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
133 (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
134
135 rockchip_pcie_write(rockchip, vid_regs,
136 PCIE_CORE_CONFIG_VENDOR);
137 }
138
139 rockchip_pcie_write(rockchip, hdr->deviceid << 16,
140 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
141
142 rockchip_pcie_write(rockchip,
143 hdr->revid |
144 hdr->progif_code << 8 |
145 hdr->subclass_code << 16 |
146 hdr->baseclass_code << 24,
147 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
148 rockchip_pcie_write(rockchip, hdr->cache_line_size,
149 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
150 PCI_CACHE_LINE_SIZE);
151 rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
152 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
153 PCI_SUBSYSTEM_VENDOR_ID);
154 rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
155 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
156 PCI_INTERRUPT_LINE);
157
158 return 0;
159}
160
161static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
162 struct pci_epf_bar *epf_bar)
163{
164 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
165 struct rockchip_pcie *rockchip = &ep->rockchip;
166 dma_addr_t bar_phys = epf_bar->phys_addr;
167 enum pci_barno bar = epf_bar->barno;
168 int flags = epf_bar->flags;
169 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
170 u64 sz;
171
172 /* BAR size is 2^(aperture + 7) */
173 sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
174
175 /*
176 * roundup_pow_of_two() returns an unsigned long, which is not suited
177 * for 64bit values.
178 */
179 sz = 1ULL << fls64(sz - 1);
180 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
181
182 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
183 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
184 } else {
185 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
186 bool is_64bits = sz > SZ_2G;
187
188 if (is_64bits && (bar & 1))
189 return -EINVAL;
190
191 if (is_64bits && is_prefetch)
192 ctrl =
193 ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
194 else if (is_prefetch)
195 ctrl =
196 ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
197 else if (is_64bits)
198 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
199 else
200 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
201 }
202
203 if (bar < BAR_4) {
204 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
205 b = bar;
206 } else {
207 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
208 b = bar - BAR_4;
209 }
210
211 addr0 = lower_32_bits(bar_phys);
212 addr1 = upper_32_bits(bar_phys);
213
214 cfg = rockchip_pcie_read(rockchip, reg);
215 cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
216 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
217 cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
218 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
219
220 rockchip_pcie_write(rockchip, cfg, reg);
221 rockchip_pcie_write(rockchip, addr0,
222 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
223 rockchip_pcie_write(rockchip, addr1,
224 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
225
226 return 0;
227}
228
229static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
230 struct pci_epf_bar *epf_bar)
231{
232 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
233 struct rockchip_pcie *rockchip = &ep->rockchip;
234 u32 reg, cfg, b, ctrl;
235 enum pci_barno bar = epf_bar->barno;
236
237 if (bar < BAR_4) {
238 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
239 b = bar;
240 } else {
241 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
242 b = bar - BAR_4;
243 }
244
245 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
246 cfg = rockchip_pcie_read(rockchip, reg);
247 cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
248 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
249 cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
250
251 rockchip_pcie_write(rockchip, cfg, reg);
252 rockchip_pcie_write(rockchip, 0x0,
253 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
254 rockchip_pcie_write(rockchip, 0x0,
255 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
256}
257
258static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
259 phys_addr_t addr, u64 pci_addr,
260 size_t size)
261{
262 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
263 struct rockchip_pcie *pcie = &ep->rockchip;
264 u32 r;
265
266 r = find_first_zero_bit(&ep->ob_region_map,
267 sizeof(ep->ob_region_map) * BITS_PER_LONG);
268 /*
269 * Region 0 is reserved for configuration space and shouldn't
270 * be used elsewhere per TRM, so leave it out.
271 */
272 if (r >= ep->max_regions - 1) {
273 dev_err(&epc->dev, "no free outbound region\n");
274 return -EINVAL;
275 }
276
277 rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
278 pci_addr, size);
279
280 set_bit(r, &ep->ob_region_map);
281 ep->ob_addr[r] = addr;
282
283 return 0;
284}
285
286static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
287 phys_addr_t addr)
288{
289 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
290 struct rockchip_pcie *rockchip = &ep->rockchip;
291 u32 r;
292
293 for (r = 0; r < ep->max_regions - 1; r++)
294 if (ep->ob_addr[r] == addr)
295 break;
296
297 /*
298 * Region 0 is reserved for configuration space and shouldn't
299 * be used elsewhere per TRM, so leave it out.
300 */
301 if (r == ep->max_regions - 1)
302 return;
303
304 rockchip_pcie_clear_ep_ob_atu(rockchip, r);
305
306 ep->ob_addr[r] = 0;
307 clear_bit(r, &ep->ob_region_map);
308}
309
310static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
311 u8 multi_msg_cap)
312{
313 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
314 struct rockchip_pcie *rockchip = &ep->rockchip;
315 u16 flags;
316
317 flags = rockchip_pcie_read(rockchip,
318 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
319 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
320 flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
321 flags |=
322 ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
323 PCI_MSI_FLAGS_64BIT;
324 flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
325 rockchip_pcie_write(rockchip, flags,
326 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
327 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
328 return 0;
329}
330
331static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
332{
333 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
334 struct rockchip_pcie *rockchip = &ep->rockchip;
335 u16 flags;
336
337 flags = rockchip_pcie_read(rockchip,
338 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
339 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
340 if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
341 return -EINVAL;
342
343 return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
344 ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
345}
346
347static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
348 u8 intx, bool is_asserted)
349{
350 struct rockchip_pcie *rockchip = &ep->rockchip;
351 u32 r = ep->max_regions - 1;
352 u32 offset;
353 u16 status;
354 u8 msg_code;
355
356 if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
357 ep->irq_pci_fn != fn)) {
358 rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
359 AXI_WRAPPER_NOR_MSG,
360 ep->irq_phys_addr, 0, 0);
361 ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
362 ep->irq_pci_fn = fn;
363 }
364
365 intx &= 3;
366 if (is_asserted) {
367 ep->irq_pending |= BIT(intx);
368 msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
369 } else {
370 ep->irq_pending &= ~BIT(intx);
371 msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
372 }
373
374 status = rockchip_pcie_read(rockchip,
375 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
376 ROCKCHIP_PCIE_EP_CMD_STATUS);
377 status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
378
379 if ((status != 0) ^ (ep->irq_pending != 0)) {
380 status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
381 rockchip_pcie_write(rockchip, status,
382 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
383 ROCKCHIP_PCIE_EP_CMD_STATUS);
384 }
385
386 offset =
387 ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
388 ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
389 writel(0, ep->irq_cpu_addr + offset);
390}
391
392static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
393 u8 intx)
394{
395 u16 cmd;
396
397 cmd = rockchip_pcie_read(&ep->rockchip,
398 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
399 ROCKCHIP_PCIE_EP_CMD_STATUS);
400
401 if (cmd & PCI_COMMAND_INTX_DISABLE)
402 return -EINVAL;
403
404 /*
405 * Should add some delay between toggling INTx per TRM vaguely saying
406 * it depends on some cycles of the AHB bus clock to function it. So
407 * add sufficient 1ms here.
408 */
409 rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
410 mdelay(1);
411 rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
412 return 0;
413}
414
415static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
416 u8 interrupt_num)
417{
418 struct rockchip_pcie *rockchip = &ep->rockchip;
419 u16 flags, mme, data, data_mask;
420 u8 msi_count;
421 u64 pci_addr, pci_addr_mask = 0xff;
422
423 /* Check MSI enable bit */
424 flags = rockchip_pcie_read(&ep->rockchip,
425 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
426 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
427 if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
428 return -EINVAL;
429
430 /* Get MSI numbers from MME */
431 mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
432 ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
433 msi_count = 1 << mme;
434 if (!interrupt_num || interrupt_num > msi_count)
435 return -EINVAL;
436
437 /* Set MSI private data */
438 data_mask = msi_count - 1;
439 data = rockchip_pcie_read(rockchip,
440 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
441 ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
442 PCI_MSI_DATA_64);
443 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
444
445 /* Get MSI PCI address */
446 pci_addr = rockchip_pcie_read(rockchip,
447 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
448 ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
449 PCI_MSI_ADDRESS_HI);
450 pci_addr <<= 32;
451 pci_addr |= rockchip_pcie_read(rockchip,
452 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
453 ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
454 PCI_MSI_ADDRESS_LO);
455 pci_addr &= GENMASK_ULL(63, 2);
456
457 /* Set the outbound region if needed. */
458 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
459 ep->irq_pci_fn != fn)) {
460 rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
461 AXI_WRAPPER_MEM_WRITE,
462 ep->irq_phys_addr,
463 pci_addr & ~pci_addr_mask,
464 pci_addr_mask + 1);
465 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
466 ep->irq_pci_fn = fn;
467 }
468
469 writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
470 return 0;
471}
472
473static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
474 enum pci_epc_irq_type type,
475 u8 interrupt_num)
476{
477 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
478
479 switch (type) {
480 case PCI_EPC_IRQ_LEGACY:
481 return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
482 case PCI_EPC_IRQ_MSI:
483 return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
484 default:
485 return -EINVAL;
486 }
487}
488
489static int rockchip_pcie_ep_start(struct pci_epc *epc)
490{
491 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
492 struct rockchip_pcie *rockchip = &ep->rockchip;
493 struct pci_epf *epf;
494 u32 cfg;
495
496 cfg = BIT(0);
497 list_for_each_entry(epf, &epc->pci_epf, list)
498 cfg |= BIT(epf->func_no);
499
500 rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
501
502 list_for_each_entry(epf, &epc->pci_epf, list)
503 pci_epf_linkup(epf);
504
505 return 0;
506}
507
508static const struct pci_epc_ops rockchip_pcie_epc_ops = {
509 .write_header = rockchip_pcie_ep_write_header,
510 .set_bar = rockchip_pcie_ep_set_bar,
511 .clear_bar = rockchip_pcie_ep_clear_bar,
512 .map_addr = rockchip_pcie_ep_map_addr,
513 .unmap_addr = rockchip_pcie_ep_unmap_addr,
514 .set_msi = rockchip_pcie_ep_set_msi,
515 .get_msi = rockchip_pcie_ep_get_msi,
516 .raise_irq = rockchip_pcie_ep_raise_irq,
517 .start = rockchip_pcie_ep_start,
518};
519
520static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
521 struct rockchip_pcie_ep *ep)
522{
523 struct device *dev = rockchip->dev;
524 int err;
525
526 err = rockchip_pcie_parse_dt(rockchip);
527 if (err)
528 return err;
529
530 err = rockchip_pcie_get_phys(rockchip);
531 if (err)
532 return err;
533
534 err = of_property_read_u32(dev->of_node,
535 "rockchip,max-outbound-regions",
536 &ep->max_regions);
537 if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
538 ep->max_regions = MAX_REGION_LIMIT;
539
540 err = of_property_read_u8(dev->of_node, "max-functions",
541 &ep->epc->max_functions);
542 if (err < 0)
543 ep->epc->max_functions = 1;
544
545 return 0;
546}
547
548static const struct of_device_id rockchip_pcie_ep_of_match[] = {
549 { .compatible = "rockchip,rk3399-pcie-ep"},
550 {},
551};
552
553static int rockchip_pcie_ep_probe(struct platform_device *pdev)
554{
555 struct device *dev = &pdev->dev;
556 struct rockchip_pcie_ep *ep;
557 struct rockchip_pcie *rockchip;
558 struct pci_epc *epc;
559 size_t max_regions;
560 int err;
561
562 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
563 if (!ep)
564 return -ENOMEM;
565
566 rockchip = &ep->rockchip;
567 rockchip->is_rc = false;
568 rockchip->dev = dev;
569
570 epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
571 if (IS_ERR(epc)) {
572 dev_err(dev, "failed to create epc device\n");
573 return PTR_ERR(epc);
574 }
575
576 ep->epc = epc;
577 epc_set_drvdata(epc, ep);
578
579 err = rockchip_pcie_parse_ep_dt(rockchip, ep);
580 if (err)
581 return err;
582
583 err = rockchip_pcie_enable_clocks(rockchip);
584 if (err)
585 return err;
586
587 err = rockchip_pcie_init_port(rockchip);
588 if (err)
589 goto err_disable_clocks;
590
591 /* Establish the link automatically */
592 rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
593 PCIE_CLIENT_CONFIG);
594
595 max_regions = ep->max_regions;
596 ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr),
597 GFP_KERNEL);
598
599 if (!ep->ob_addr) {
600 err = -ENOMEM;
601 goto err_uninit_port;
602 }
603
604 /* Only enable function 0 by default */
605 rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
606
607 err = pci_epc_mem_init(epc, rockchip->mem_res->start,
608 resource_size(rockchip->mem_res));
609 if (err < 0) {
610 dev_err(dev, "failed to initialize the memory space\n");
611 goto err_uninit_port;
612 }
613
614 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
615 SZ_128K);
616 if (!ep->irq_cpu_addr) {
617 dev_err(dev, "failed to reserve memory space for MSI\n");
618 err = -ENOMEM;
619 goto err_epc_mem_exit;
620 }
621
622 ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
623
624 return 0;
625err_epc_mem_exit:
626 pci_epc_mem_exit(epc);
627err_uninit_port:
628 rockchip_pcie_deinit_phys(rockchip);
629err_disable_clocks:
630 rockchip_pcie_disable_clocks(rockchip);
631 return err;
632}
633
634static struct platform_driver rockchip_pcie_ep_driver = {
635 .driver = {
636 .name = "rockchip-pcie-ep",
637 .of_match_table = rockchip_pcie_ep_of_match,
638 },
639 .probe = rockchip_pcie_ep_probe,
640};
641
642builtin_platform_driver(rockchip_pcie_ep_driver);
diff --git a/drivers/pci/host/pcie-rockchip-host.c b/drivers/pci/host/pcie-rockchip-host.c
new file mode 100644
index 000000000000..1372d270764f
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-host.c
@@ -0,0 +1,1142 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Rockchip AXI PCIe host controller driver
4 *
5 * Copyright (c) 2016 Rockchip, Inc.
6 *
7 * Author: Shawn Lin <shawn.lin@rock-chips.com>
8 * Wenrui Li <wenrui.li@rock-chips.com>
9 *
10 * Bits taken from Synopsys DesignWare Host controller driver and
11 * ARM PCI Host generic driver.
12 */
13
14#include <linux/bitrev.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/gpio/consumer.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/iopoll.h>
21#include <linux/irq.h>
22#include <linux/irqchip/chained_irq.h>
23#include <linux/irqdomain.h>
24#include <linux/kernel.h>
25#include <linux/mfd/syscon.h>
26#include <linux/module.h>
27#include <linux/of_address.h>
28#include <linux/of_device.h>
29#include <linux/of_pci.h>
30#include <linux/of_platform.h>
31#include <linux/of_irq.h>
32#include <linux/pci.h>
33#include <linux/pci_ids.h>
34#include <linux/phy/phy.h>
35#include <linux/platform_device.h>
36#include <linux/reset.h>
37#include <linux/regmap.h>
38
39#include "../pci.h"
40#include "pcie-rockchip.h"
41
42static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
43{
44 u32 status;
45
46 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
47 status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
48 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
49}
50
51static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
52{
53 u32 status;
54
55 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
56 status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
57 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
58}
59
60static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
61{
62 u32 val;
63
64 /* Update Tx credit maximum update interval */
65 val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
66 val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
67 val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
68 rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
69}
70
71static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
72 struct pci_bus *bus, int dev)
73{
74 /* access only one slot on each root port */
75 if (bus->number == rockchip->root_bus_nr && dev > 0)
76 return 0;
77
78 /*
79 * do not read more than one device on the bus directly attached
80 * to RC's downstream side.
81 */
82 if (bus->primary == rockchip->root_bus_nr && dev > 0)
83 return 0;
84
85 return 1;
86}
87
88static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
89{
90 u32 val;
91 u8 map;
92
93 if (rockchip->legacy_phy)
94 return GENMASK(MAX_LANE_NUM - 1, 0);
95
96 val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
97 map = val & PCIE_CORE_LANE_MAP_MASK;
98
99 /* The link may be using a reverse-indexed mapping. */
100 if (val & PCIE_CORE_LANE_MAP_REVERSE)
101 map = bitrev8(map) >> 4;
102
103 return map;
104}
105
106static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
107 int where, int size, u32 *val)
108{
109 void __iomem *addr;
110
111 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
112
113 if (!IS_ALIGNED((uintptr_t)addr, size)) {
114 *val = 0;
115 return PCIBIOS_BAD_REGISTER_NUMBER;
116 }
117
118 if (size == 4) {
119 *val = readl(addr);
120 } else if (size == 2) {
121 *val = readw(addr);
122 } else if (size == 1) {
123 *val = readb(addr);
124 } else {
125 *val = 0;
126 return PCIBIOS_BAD_REGISTER_NUMBER;
127 }
128 return PCIBIOS_SUCCESSFUL;
129}
130
131static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
132 int where, int size, u32 val)
133{
134 u32 mask, tmp, offset;
135 void __iomem *addr;
136
137 offset = where & ~0x3;
138 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
139
140 if (size == 4) {
141 writel(val, addr);
142 return PCIBIOS_SUCCESSFUL;
143 }
144
145 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
146
147 /*
148 * N.B. This read/modify/write isn't safe in general because it can
149 * corrupt RW1C bits in adjacent registers. But the hardware
150 * doesn't support smaller writes.
151 */
152 tmp = readl(addr) & mask;
153 tmp |= val << ((where & 0x3) * 8);
154 writel(tmp, addr);
155
156 return PCIBIOS_SUCCESSFUL;
157}
158
159static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
160 struct pci_bus *bus, u32 devfn,
161 int where, int size, u32 *val)
162{
163 u32 busdev;
164
165 busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
166 PCI_FUNC(devfn), where);
167
168 if (!IS_ALIGNED(busdev, size)) {
169 *val = 0;
170 return PCIBIOS_BAD_REGISTER_NUMBER;
171 }
172
173 if (bus->parent->number == rockchip->root_bus_nr)
174 rockchip_pcie_cfg_configuration_accesses(rockchip,
175 AXI_WRAPPER_TYPE0_CFG);
176 else
177 rockchip_pcie_cfg_configuration_accesses(rockchip,
178 AXI_WRAPPER_TYPE1_CFG);
179
180 if (size == 4) {
181 *val = readl(rockchip->reg_base + busdev);
182 } else if (size == 2) {
183 *val = readw(rockchip->reg_base + busdev);
184 } else if (size == 1) {
185 *val = readb(rockchip->reg_base + busdev);
186 } else {
187 *val = 0;
188 return PCIBIOS_BAD_REGISTER_NUMBER;
189 }
190 return PCIBIOS_SUCCESSFUL;
191}
192
193static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
194 struct pci_bus *bus, u32 devfn,
195 int where, int size, u32 val)
196{
197 u32 busdev;
198
199 busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
200 PCI_FUNC(devfn), where);
201 if (!IS_ALIGNED(busdev, size))
202 return PCIBIOS_BAD_REGISTER_NUMBER;
203
204 if (bus->parent->number == rockchip->root_bus_nr)
205 rockchip_pcie_cfg_configuration_accesses(rockchip,
206 AXI_WRAPPER_TYPE0_CFG);
207 else
208 rockchip_pcie_cfg_configuration_accesses(rockchip,
209 AXI_WRAPPER_TYPE1_CFG);
210
211 if (size == 4)
212 writel(val, rockchip->reg_base + busdev);
213 else if (size == 2)
214 writew(val, rockchip->reg_base + busdev);
215 else if (size == 1)
216 writeb(val, rockchip->reg_base + busdev);
217 else
218 return PCIBIOS_BAD_REGISTER_NUMBER;
219
220 return PCIBIOS_SUCCESSFUL;
221}
222
223static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
224 int size, u32 *val)
225{
226 struct rockchip_pcie *rockchip = bus->sysdata;
227
228 if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
229 *val = 0xffffffff;
230 return PCIBIOS_DEVICE_NOT_FOUND;
231 }
232
233 if (bus->number == rockchip->root_bus_nr)
234 return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
235
236 return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
237 val);
238}
239
240static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
241 int where, int size, u32 val)
242{
243 struct rockchip_pcie *rockchip = bus->sysdata;
244
245 if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
246 return PCIBIOS_DEVICE_NOT_FOUND;
247
248 if (bus->number == rockchip->root_bus_nr)
249 return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
250
251 return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
252 val);
253}
254
255static struct pci_ops rockchip_pcie_ops = {
256 .read = rockchip_pcie_rd_conf,
257 .write = rockchip_pcie_wr_conf,
258};
259
260static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
261{
262 int curr;
263 u32 status, scale, power;
264
265 if (IS_ERR(rockchip->vpcie3v3))
266 return;
267
268 /*
269 * Set RC's captured slot power limit and scale if
270 * vpcie3v3 available. The default values are both zero
271 * which means the software should set these two according
272 * to the actual power supply.
273 */
274 curr = regulator_get_current_limit(rockchip->vpcie3v3);
275 if (curr <= 0)
276 return;
277
278 scale = 3; /* 0.001x */
279 curr = curr / 1000; /* convert to mA */
280 power = (curr * 3300) / 1000; /* milliwatt */
281 while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
282 if (!scale) {
283 dev_warn(rockchip->dev, "invalid power supply\n");
284 return;
285 }
286 scale--;
287 power = power / 10;
288 }
289
290 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
291 status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
292 (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
293 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
294}
295
296/**
297 * rockchip_pcie_host_init_port - Initialize hardware
298 * @rockchip: PCIe port information
299 */
300static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
301{
302 struct device *dev = rockchip->dev;
303 int err, i = MAX_LANE_NUM;
304 u32 status;
305
306 gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
307
308 err = rockchip_pcie_init_port(rockchip);
309 if (err)
310 return err;
311
312 /* Fix the transmitted FTS count desired to exit from L0s. */
313 status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
314 status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
315 (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
316 rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
317
318 rockchip_pcie_set_power_limit(rockchip);
319
320 /* Set RC's clock architecture as common clock */
321 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
322 status |= PCI_EXP_LNKSTA_SLC << 16;
323 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
324
325 /* Set RC's RCB to 128 */
326 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
327 status |= PCI_EXP_LNKCTL_RCB;
328 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
329
330 /* Enable Gen1 training */
331 rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
332 PCIE_CLIENT_CONFIG);
333
334 gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
335
336 /* 500ms timeout value should be enough for Gen1/2 training */
337 err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
338 status, PCIE_LINK_UP(status), 20,
339 500 * USEC_PER_MSEC);
340 if (err) {
341 dev_err(dev, "PCIe link training gen1 timeout!\n");
342 goto err_power_off_phy;
343 }
344
345 if (rockchip->link_gen == 2) {
346 /*
347 * Enable retrain for gen2. This should be configured only after
348 * gen1 finished.
349 */
350 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
351 status |= PCI_EXP_LNKCTL_RL;
352 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
353
354 err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
355 status, PCIE_LINK_IS_GEN2(status), 20,
356 500 * USEC_PER_MSEC);
357 if (err)
358 dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
359 }
360
361 /* Check the final link width from negotiated lane counter from MGMT */
362 status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
363 status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
364 PCIE_CORE_PL_CONF_LANE_SHIFT);
365 dev_dbg(dev, "current link width is x%d\n", status);
366
367 /* Power off unused lane(s) */
368 rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
369 for (i = 0; i < MAX_LANE_NUM; i++) {
370 if (!(rockchip->lanes_map & BIT(i))) {
371 dev_dbg(dev, "idling lane %d\n", i);
372 phy_power_off(rockchip->phys[i]);
373 }
374 }
375
376 rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
377 PCIE_CORE_CONFIG_VENDOR);
378 rockchip_pcie_write(rockchip,
379 PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
380 PCIE_RC_CONFIG_RID_CCR);
381
382 /* Clear THP cap's next cap pointer to remove L1 substate cap */
383 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
384 status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
385 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
386
387 /* Clear L0s from RC's link cap */
388 if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
389 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
390 status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
391 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
392 }
393
394 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
395 status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
396 status |= PCIE_RC_CONFIG_DCSR_MPS_256;
397 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
398
399 return 0;
400err_power_off_phy:
401 while (i--)
402 phy_power_off(rockchip->phys[i]);
403 i = MAX_LANE_NUM;
404 while (i--)
405 phy_exit(rockchip->phys[i]);
406 return err;
407}
408
409static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
410{
411 struct rockchip_pcie *rockchip = arg;
412 struct device *dev = rockchip->dev;
413 u32 reg;
414 u32 sub_reg;
415
416 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
417 if (reg & PCIE_CLIENT_INT_LOCAL) {
418 dev_dbg(dev, "local interrupt received\n");
419 sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
420 if (sub_reg & PCIE_CORE_INT_PRFPE)
421 dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
422
423 if (sub_reg & PCIE_CORE_INT_CRFPE)
424 dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
425
426 if (sub_reg & PCIE_CORE_INT_RRPE)
427 dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
428
429 if (sub_reg & PCIE_CORE_INT_PRFO)
430 dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
431
432 if (sub_reg & PCIE_CORE_INT_CRFO)
433 dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
434
435 if (sub_reg & PCIE_CORE_INT_RT)
436 dev_dbg(dev, "replay timer timed out\n");
437
438 if (sub_reg & PCIE_CORE_INT_RTR)
439 dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
440
441 if (sub_reg & PCIE_CORE_INT_PE)
442 dev_dbg(dev, "phy error detected on receive side\n");
443
444 if (sub_reg & PCIE_CORE_INT_MTR)
445 dev_dbg(dev, "malformed TLP received from the link\n");
446
447 if (sub_reg & PCIE_CORE_INT_UCR)
448 dev_dbg(dev, "malformed TLP received from the link\n");
449
450 if (sub_reg & PCIE_CORE_INT_FCE)
451 dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
452
453 if (sub_reg & PCIE_CORE_INT_CT)
454 dev_dbg(dev, "a request timed out waiting for completion\n");
455
456 if (sub_reg & PCIE_CORE_INT_UTC)
457 dev_dbg(dev, "unmapped TC error\n");
458
459 if (sub_reg & PCIE_CORE_INT_MMVC)
460 dev_dbg(dev, "MSI mask register changes\n");
461
462 rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
463 } else if (reg & PCIE_CLIENT_INT_PHY) {
464 dev_dbg(dev, "phy link changes\n");
465 rockchip_pcie_update_txcredit_mui(rockchip);
466 rockchip_pcie_clr_bw_int(rockchip);
467 }
468
469 rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
470 PCIE_CLIENT_INT_STATUS);
471
472 return IRQ_HANDLED;
473}
474
475static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
476{
477 struct rockchip_pcie *rockchip = arg;
478 struct device *dev = rockchip->dev;
479 u32 reg;
480
481 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
482 if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
483 dev_dbg(dev, "legacy done interrupt received\n");
484
485 if (reg & PCIE_CLIENT_INT_MSG)
486 dev_dbg(dev, "message done interrupt received\n");
487
488 if (reg & PCIE_CLIENT_INT_HOT_RST)
489 dev_dbg(dev, "hot reset interrupt received\n");
490
491 if (reg & PCIE_CLIENT_INT_DPA)
492 dev_dbg(dev, "dpa interrupt received\n");
493
494 if (reg & PCIE_CLIENT_INT_FATAL_ERR)
495 dev_dbg(dev, "fatal error interrupt received\n");
496
497 if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
498 dev_dbg(dev, "no fatal error interrupt received\n");
499
500 if (reg & PCIE_CLIENT_INT_CORR_ERR)
501 dev_dbg(dev, "correctable error interrupt received\n");
502
503 if (reg & PCIE_CLIENT_INT_PHY)
504 dev_dbg(dev, "phy interrupt received\n");
505
506 rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
507 PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
508 PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
509 PCIE_CLIENT_INT_NFATAL_ERR |
510 PCIE_CLIENT_INT_CORR_ERR |
511 PCIE_CLIENT_INT_PHY),
512 PCIE_CLIENT_INT_STATUS);
513
514 return IRQ_HANDLED;
515}
516
517static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
518{
519 struct irq_chip *chip = irq_desc_get_chip(desc);
520 struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
521 struct device *dev = rockchip->dev;
522 u32 reg;
523 u32 hwirq;
524 u32 virq;
525
526 chained_irq_enter(chip, desc);
527
528 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
529 reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
530
531 while (reg) {
532 hwirq = ffs(reg) - 1;
533 reg &= ~BIT(hwirq);
534
535 virq = irq_find_mapping(rockchip->irq_domain, hwirq);
536 if (virq)
537 generic_handle_irq(virq);
538 else
539 dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
540 }
541
542 chained_irq_exit(chip, desc);
543}
544
545static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
546{
547 int irq, err;
548 struct device *dev = rockchip->dev;
549 struct platform_device *pdev = to_platform_device(dev);
550
551 irq = platform_get_irq_byname(pdev, "sys");
552 if (irq < 0) {
553 dev_err(dev, "missing sys IRQ resource\n");
554 return irq;
555 }
556
557 err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
558 IRQF_SHARED, "pcie-sys", rockchip);
559 if (err) {
560 dev_err(dev, "failed to request PCIe subsystem IRQ\n");
561 return err;
562 }
563
564 irq = platform_get_irq_byname(pdev, "legacy");
565 if (irq < 0) {
566 dev_err(dev, "missing legacy IRQ resource\n");
567 return irq;
568 }
569
570 irq_set_chained_handler_and_data(irq,
571 rockchip_pcie_legacy_int_handler,
572 rockchip);
573
574 irq = platform_get_irq_byname(pdev, "client");
575 if (irq < 0) {
576 dev_err(dev, "missing client IRQ resource\n");
577 return irq;
578 }
579
580 err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
581 IRQF_SHARED, "pcie-client", rockchip);
582 if (err) {
583 dev_err(dev, "failed to request PCIe client IRQ\n");
584 return err;
585 }
586
587 return 0;
588}
589
590/**
591 * rockchip_pcie_parse_host_dt - Parse Device Tree
592 * @rockchip: PCIe port information
593 *
594 * Return: '0' on success and error value on failure
595 */
596static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
597{
598 struct device *dev = rockchip->dev;
599 int err;
600
601 err = rockchip_pcie_parse_dt(rockchip);
602 if (err)
603 return err;
604
605 err = rockchip_pcie_setup_irq(rockchip);
606 if (err)
607 return err;
608
609 rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
610 if (IS_ERR(rockchip->vpcie12v)) {
611 if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
612 return -EPROBE_DEFER;
613 dev_info(dev, "no vpcie12v regulator found\n");
614 }
615
616 rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
617 if (IS_ERR(rockchip->vpcie3v3)) {
618 if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
619 return -EPROBE_DEFER;
620 dev_info(dev, "no vpcie3v3 regulator found\n");
621 }
622
623 rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
624 if (IS_ERR(rockchip->vpcie1v8)) {
625 if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
626 return -EPROBE_DEFER;
627 dev_info(dev, "no vpcie1v8 regulator found\n");
628 }
629
630 rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
631 if (IS_ERR(rockchip->vpcie0v9)) {
632 if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
633 return -EPROBE_DEFER;
634 dev_info(dev, "no vpcie0v9 regulator found\n");
635 }
636
637 return 0;
638}
639
640static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
641{
642 struct device *dev = rockchip->dev;
643 int err;
644
645 if (!IS_ERR(rockchip->vpcie12v)) {
646 err = regulator_enable(rockchip->vpcie12v);
647 if (err) {
648 dev_err(dev, "fail to enable vpcie12v regulator\n");
649 goto err_out;
650 }
651 }
652
653 if (!IS_ERR(rockchip->vpcie3v3)) {
654 err = regulator_enable(rockchip->vpcie3v3);
655 if (err) {
656 dev_err(dev, "fail to enable vpcie3v3 regulator\n");
657 goto err_disable_12v;
658 }
659 }
660
661 if (!IS_ERR(rockchip->vpcie1v8)) {
662 err = regulator_enable(rockchip->vpcie1v8);
663 if (err) {
664 dev_err(dev, "fail to enable vpcie1v8 regulator\n");
665 goto err_disable_3v3;
666 }
667 }
668
669 if (!IS_ERR(rockchip->vpcie0v9)) {
670 err = regulator_enable(rockchip->vpcie0v9);
671 if (err) {
672 dev_err(dev, "fail to enable vpcie0v9 regulator\n");
673 goto err_disable_1v8;
674 }
675 }
676
677 return 0;
678
679err_disable_1v8:
680 if (!IS_ERR(rockchip->vpcie1v8))
681 regulator_disable(rockchip->vpcie1v8);
682err_disable_3v3:
683 if (!IS_ERR(rockchip->vpcie3v3))
684 regulator_disable(rockchip->vpcie3v3);
685err_disable_12v:
686 if (!IS_ERR(rockchip->vpcie12v))
687 regulator_disable(rockchip->vpcie12v);
688err_out:
689 return err;
690}
691
692static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
693{
694 rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
695 (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
696 rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
697 PCIE_CORE_INT_MASK);
698
699 rockchip_pcie_enable_bw_int(rockchip);
700}
701
702static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
703 irq_hw_number_t hwirq)
704{
705 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
706 irq_set_chip_data(irq, domain->host_data);
707
708 return 0;
709}
710
711static const struct irq_domain_ops intx_domain_ops = {
712 .map = rockchip_pcie_intx_map,
713};
714
715static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
716{
717 struct device *dev = rockchip->dev;
718 struct device_node *intc = of_get_next_child(dev->of_node, NULL);
719
720 if (!intc) {
721 dev_err(dev, "missing child interrupt-controller node\n");
722 return -EINVAL;
723 }
724
725 rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
726 &intx_domain_ops, rockchip);
727 if (!rockchip->irq_domain) {
728 dev_err(dev, "failed to get a INTx IRQ domain\n");
729 return -EINVAL;
730 }
731
732 return 0;
733}
734
735static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
736 int region_no, int type, u8 num_pass_bits,
737 u32 lower_addr, u32 upper_addr)
738{
739 u32 ob_addr_0;
740 u32 ob_addr_1;
741 u32 ob_desc_0;
742 u32 aw_offset;
743
744 if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
745 return -EINVAL;
746 if (num_pass_bits + 1 < 8)
747 return -EINVAL;
748 if (num_pass_bits > 63)
749 return -EINVAL;
750 if (region_no == 0) {
751 if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
752 return -EINVAL;
753 }
754 if (region_no != 0) {
755 if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
756 return -EINVAL;
757 }
758
759 aw_offset = (region_no << OB_REG_SIZE_SHIFT);
760
761 ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
762 ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
763 ob_addr_1 = upper_addr;
764 ob_desc_0 = (1 << 23 | type);
765
766 rockchip_pcie_write(rockchip, ob_addr_0,
767 PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
768 rockchip_pcie_write(rockchip, ob_addr_1,
769 PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
770 rockchip_pcie_write(rockchip, ob_desc_0,
771 PCIE_CORE_OB_REGION_DESC0 + aw_offset);
772 rockchip_pcie_write(rockchip, 0,
773 PCIE_CORE_OB_REGION_DESC1 + aw_offset);
774
775 return 0;
776}
777
778static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
779 int region_no, u8 num_pass_bits,
780 u32 lower_addr, u32 upper_addr)
781{
782 u32 ib_addr_0;
783 u32 ib_addr_1;
784 u32 aw_offset;
785
786 if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
787 return -EINVAL;
788 if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
789 return -EINVAL;
790 if (num_pass_bits > 63)
791 return -EINVAL;
792
793 aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
794
795 ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
796 ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
797 ib_addr_1 = upper_addr;
798
799 rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
800 rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
801
802 return 0;
803}
804
805static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
806{
807 struct device *dev = rockchip->dev;
808 int offset;
809 int err;
810 int reg_no;
811
812 rockchip_pcie_cfg_configuration_accesses(rockchip,
813 AXI_WRAPPER_TYPE0_CFG);
814
815 for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
816 err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
817 AXI_WRAPPER_MEM_WRITE,
818 20 - 1,
819 rockchip->mem_bus_addr +
820 (reg_no << 20),
821 0);
822 if (err) {
823 dev_err(dev, "program RC mem outbound ATU failed\n");
824 return err;
825 }
826 }
827
828 err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
829 if (err) {
830 dev_err(dev, "program RC mem inbound ATU failed\n");
831 return err;
832 }
833
834 offset = rockchip->mem_size >> 20;
835 for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
836 err = rockchip_pcie_prog_ob_atu(rockchip,
837 reg_no + 1 + offset,
838 AXI_WRAPPER_IO_WRITE,
839 20 - 1,
840 rockchip->io_bus_addr +
841 (reg_no << 20),
842 0);
843 if (err) {
844 dev_err(dev, "program RC io outbound ATU failed\n");
845 return err;
846 }
847 }
848
849 /* assign message regions */
850 rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
851 AXI_WRAPPER_NOR_MSG,
852 20 - 1, 0, 0);
853
854 rockchip->msg_bus_addr = rockchip->mem_bus_addr +
855 ((reg_no + offset) << 20);
856 return err;
857}
858
859static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
860{
861 u32 value;
862 int err;
863
864 /* send PME_TURN_OFF message */
865 writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
866
867 /* read LTSSM and wait for falling into L2 link state */
868 err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
869 value, PCIE_LINK_IS_L2(value), 20,
870 jiffies_to_usecs(5 * HZ));
871 if (err) {
872 dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
873 return err;
874 }
875
876 return 0;
877}
878
879static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
880{
881 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
882 int ret;
883
884 /* disable core and cli int since we don't need to ack PME_ACK */
885 rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
886 PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
887 rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
888
889 ret = rockchip_pcie_wait_l2(rockchip);
890 if (ret) {
891 rockchip_pcie_enable_interrupts(rockchip);
892 return ret;
893 }
894
895 rockchip_pcie_deinit_phys(rockchip);
896
897 rockchip_pcie_disable_clocks(rockchip);
898
899 if (!IS_ERR(rockchip->vpcie0v9))
900 regulator_disable(rockchip->vpcie0v9);
901
902 return ret;
903}
904
905static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
906{
907 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
908 int err;
909
910 if (!IS_ERR(rockchip->vpcie0v9)) {
911 err = regulator_enable(rockchip->vpcie0v9);
912 if (err) {
913 dev_err(dev, "fail to enable vpcie0v9 regulator\n");
914 return err;
915 }
916 }
917
918 err = rockchip_pcie_enable_clocks(rockchip);
919 if (err)
920 goto err_disable_0v9;
921
922 err = rockchip_pcie_host_init_port(rockchip);
923 if (err)
924 goto err_pcie_resume;
925
926 err = rockchip_pcie_cfg_atu(rockchip);
927 if (err)
928 goto err_err_deinit_port;
929
930 /* Need this to enter L1 again */
931 rockchip_pcie_update_txcredit_mui(rockchip);
932 rockchip_pcie_enable_interrupts(rockchip);
933
934 return 0;
935
936err_err_deinit_port:
937 rockchip_pcie_deinit_phys(rockchip);
938err_pcie_resume:
939 rockchip_pcie_disable_clocks(rockchip);
940err_disable_0v9:
941 if (!IS_ERR(rockchip->vpcie0v9))
942 regulator_disable(rockchip->vpcie0v9);
943 return err;
944}
945
946static int rockchip_pcie_probe(struct platform_device *pdev)
947{
948 struct rockchip_pcie *rockchip;
949 struct device *dev = &pdev->dev;
950 struct pci_bus *bus, *child;
951 struct pci_host_bridge *bridge;
952 struct resource_entry *win;
953 resource_size_t io_base;
954 struct resource *mem;
955 struct resource *io;
956 int err;
957
958 LIST_HEAD(res);
959
960 if (!dev->of_node)
961 return -ENODEV;
962
963 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
964 if (!bridge)
965 return -ENOMEM;
966
967 rockchip = pci_host_bridge_priv(bridge);
968
969 platform_set_drvdata(pdev, rockchip);
970 rockchip->dev = dev;
971 rockchip->is_rc = true;
972
973 err = rockchip_pcie_parse_host_dt(rockchip);
974 if (err)
975 return err;
976
977 err = rockchip_pcie_enable_clocks(rockchip);
978 if (err)
979 return err;
980
981 err = rockchip_pcie_set_vpcie(rockchip);
982 if (err) {
983 dev_err(dev, "failed to set vpcie regulator\n");
984 goto err_set_vpcie;
985 }
986
987 err = rockchip_pcie_host_init_port(rockchip);
988 if (err)
989 goto err_vpcie;
990
991 rockchip_pcie_enable_interrupts(rockchip);
992
993 err = rockchip_pcie_init_irq_domain(rockchip);
994 if (err < 0)
995 goto err_deinit_port;
996
997 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
998 &res, &io_base);
999 if (err)
1000 goto err_remove_irq_domain;
1001
1002 err = devm_request_pci_bus_resources(dev, &res);
1003 if (err)
1004 goto err_free_res;
1005
1006 /* Get the I/O and memory ranges from DT */
1007 resource_list_for_each_entry(win, &res) {
1008 switch (resource_type(win->res)) {
1009 case IORESOURCE_IO:
1010 io = win->res;
1011 io->name = "I/O";
1012 rockchip->io_size = resource_size(io);
1013 rockchip->io_bus_addr = io->start - win->offset;
1014 err = pci_remap_iospace(io, io_base);
1015 if (err) {
1016 dev_warn(dev, "error %d: failed to map resource %pR\n",
1017 err, io);
1018 continue;
1019 }
1020 rockchip->io = io;
1021 break;
1022 case IORESOURCE_MEM:
1023 mem = win->res;
1024 mem->name = "MEM";
1025 rockchip->mem_size = resource_size(mem);
1026 rockchip->mem_bus_addr = mem->start - win->offset;
1027 break;
1028 case IORESOURCE_BUS:
1029 rockchip->root_bus_nr = win->res->start;
1030 break;
1031 default:
1032 continue;
1033 }
1034 }
1035
1036 err = rockchip_pcie_cfg_atu(rockchip);
1037 if (err)
1038 goto err_unmap_iospace;
1039
1040 rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
1041 if (!rockchip->msg_region) {
1042 err = -ENOMEM;
1043 goto err_unmap_iospace;
1044 }
1045
1046 list_splice_init(&res, &bridge->windows);
1047 bridge->dev.parent = dev;
1048 bridge->sysdata = rockchip;
1049 bridge->busnr = 0;
1050 bridge->ops = &rockchip_pcie_ops;
1051 bridge->map_irq = of_irq_parse_and_map_pci;
1052 bridge->swizzle_irq = pci_common_swizzle;
1053
1054 err = pci_scan_root_bus_bridge(bridge);
1055 if (err < 0)
1056 goto err_unmap_iospace;
1057
1058 bus = bridge->bus;
1059
1060 rockchip->root_bus = bus;
1061
1062 pci_bus_size_bridges(bus);
1063 pci_bus_assign_resources(bus);
1064 list_for_each_entry(child, &bus->children, node)
1065 pcie_bus_configure_settings(child);
1066
1067 pci_bus_add_devices(bus);
1068 return 0;
1069
1070err_unmap_iospace:
1071 pci_unmap_iospace(rockchip->io);
1072err_free_res:
1073 pci_free_resource_list(&res);
1074err_remove_irq_domain:
1075 irq_domain_remove(rockchip->irq_domain);
1076err_deinit_port:
1077 rockchip_pcie_deinit_phys(rockchip);
1078err_vpcie:
1079 if (!IS_ERR(rockchip->vpcie12v))
1080 regulator_disable(rockchip->vpcie12v);
1081 if (!IS_ERR(rockchip->vpcie3v3))
1082 regulator_disable(rockchip->vpcie3v3);
1083 if (!IS_ERR(rockchip->vpcie1v8))
1084 regulator_disable(rockchip->vpcie1v8);
1085 if (!IS_ERR(rockchip->vpcie0v9))
1086 regulator_disable(rockchip->vpcie0v9);
1087err_set_vpcie:
1088 rockchip_pcie_disable_clocks(rockchip);
1089 return err;
1090}
1091
1092static int rockchip_pcie_remove(struct platform_device *pdev)
1093{
1094 struct device *dev = &pdev->dev;
1095 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1096
1097 pci_stop_root_bus(rockchip->root_bus);
1098 pci_remove_root_bus(rockchip->root_bus);
1099 pci_unmap_iospace(rockchip->io);
1100 irq_domain_remove(rockchip->irq_domain);
1101
1102 rockchip_pcie_deinit_phys(rockchip);
1103
1104 rockchip_pcie_disable_clocks(rockchip);
1105
1106 if (!IS_ERR(rockchip->vpcie12v))
1107 regulator_disable(rockchip->vpcie12v);
1108 if (!IS_ERR(rockchip->vpcie3v3))
1109 regulator_disable(rockchip->vpcie3v3);
1110 if (!IS_ERR(rockchip->vpcie1v8))
1111 regulator_disable(rockchip->vpcie1v8);
1112 if (!IS_ERR(rockchip->vpcie0v9))
1113 regulator_disable(rockchip->vpcie0v9);
1114
1115 return 0;
1116}
1117
1118static const struct dev_pm_ops rockchip_pcie_pm_ops = {
1119 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
1120 rockchip_pcie_resume_noirq)
1121};
1122
1123static const struct of_device_id rockchip_pcie_of_match[] = {
1124 { .compatible = "rockchip,rk3399-pcie", },
1125 {}
1126};
1127MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
1128
1129static struct platform_driver rockchip_pcie_driver = {
1130 .driver = {
1131 .name = "rockchip-pcie",
1132 .of_match_table = rockchip_pcie_of_match,
1133 .pm = &rockchip_pcie_pm_ops,
1134 },
1135 .probe = rockchip_pcie_probe,
1136 .remove = rockchip_pcie_remove,
1137};
1138module_platform_driver(rockchip_pcie_driver);
1139
1140MODULE_AUTHOR("Rockchip Inc");
1141MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
1142MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index f1e8f97ea1fb..c53d1322a3d6 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -11,535 +11,154 @@
11 * ARM PCI Host generic driver. 11 * ARM PCI Host generic driver.
12 */ 12 */
13 13
14#include <linux/bitrev.h>
15#include <linux/clk.h> 14#include <linux/clk.h>
16#include <linux/delay.h> 15#include <linux/delay.h>
17#include <linux/gpio/consumer.h> 16#include <linux/gpio/consumer.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/iopoll.h>
21#include <linux/irq.h>
22#include <linux/irqchip/chained_irq.h>
23#include <linux/irqdomain.h>
24#include <linux/kernel.h>
25#include <linux/mfd/syscon.h>
26#include <linux/module.h>
27#include <linux/of_address.h>
28#include <linux/of_device.h>
29#include <linux/of_pci.h> 17#include <linux/of_pci.h>
30#include <linux/of_platform.h>
31#include <linux/of_irq.h>
32#include <linux/pci.h>
33#include <linux/pci_ids.h>
34#include <linux/phy/phy.h> 18#include <linux/phy/phy.h>
35#include <linux/platform_device.h> 19#include <linux/platform_device.h>
36#include <linux/reset.h> 20#include <linux/reset.h>
37#include <linux/regmap.h>
38 21
39/* 22#include "../pci.h"
40 * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 23#include "pcie-rockchip.h"
41 * bits. This allows atomic updates of the register without locking.
42 */
43#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
44#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
45
46#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
47#define MAX_LANE_NUM 4
48
49#define PCIE_CLIENT_BASE 0x0
50#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
51#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
52#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
53#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
54#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
55#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
56#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
57#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
58#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
59#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
60#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
61#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
62#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
63#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
64#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
65#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
66#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
67#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
68#define PCIE_CLIENT_INTR_SHIFT 5
69#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
70#define PCIE_CLIENT_INT_MSG BIT(14)
71#define PCIE_CLIENT_INT_HOT_RST BIT(13)
72#define PCIE_CLIENT_INT_DPA BIT(12)
73#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
74#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
75#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
76#define PCIE_CLIENT_INT_INTD BIT(8)
77#define PCIE_CLIENT_INT_INTC BIT(7)
78#define PCIE_CLIENT_INT_INTB BIT(6)
79#define PCIE_CLIENT_INT_INTA BIT(5)
80#define PCIE_CLIENT_INT_LOCAL BIT(4)
81#define PCIE_CLIENT_INT_UDMA BIT(3)
82#define PCIE_CLIENT_INT_PHY BIT(2)
83#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
84#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
85
86#define PCIE_CLIENT_INT_LEGACY \
87 (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
88 PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
89
90#define PCIE_CLIENT_INT_CLI \
91 (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
92 PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
93 PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
94 PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
95 PCIE_CLIENT_INT_PHY)
96
97#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
98#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
99#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
100#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
101#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
102#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
103#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
104#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
105#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
106#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
107#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
108#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
109#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
110#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
111 (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
112#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
113#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
114#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
115#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
116#define PCIE_CORE_INT_PRFPE BIT(0)
117#define PCIE_CORE_INT_CRFPE BIT(1)
118#define PCIE_CORE_INT_RRPE BIT(2)
119#define PCIE_CORE_INT_PRFO BIT(3)
120#define PCIE_CORE_INT_CRFO BIT(4)
121#define PCIE_CORE_INT_RT BIT(5)
122#define PCIE_CORE_INT_RTR BIT(6)
123#define PCIE_CORE_INT_PE BIT(7)
124#define PCIE_CORE_INT_MTR BIT(8)
125#define PCIE_CORE_INT_UCR BIT(9)
126#define PCIE_CORE_INT_FCE BIT(10)
127#define PCIE_CORE_INT_CT BIT(11)
128#define PCIE_CORE_INT_UTC BIT(18)
129#define PCIE_CORE_INT_MMVC BIT(19)
130#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
131#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
132#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
133
134#define PCIE_CORE_INT \
135 (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
136 PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
137 PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
138 PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
139 PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
140 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
141 PCIE_CORE_INT_MMVC)
142
143#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
144#define PCIE_RC_CONFIG_BASE 0xa00000
145#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
146#define PCIE_RC_CONFIG_SCC_SHIFT 16
147#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
148#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
149#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
150#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
151#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
152#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
153#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
154#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
155#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
156#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
157#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
158#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
159#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
160
161#define PCIE_CORE_AXI_CONF_BASE 0xc00000
162#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
163#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
164#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
165#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
166#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
167#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
168
169#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
170#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
171#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
172#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
173#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
174
175/* Size of one AXI Region (not Region 0) */
176#define AXI_REGION_SIZE BIT(20)
177/* Size of Region 0, equal to sum of sizes of other regions */
178#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
179#define OB_REG_SIZE_SHIFT 5
180#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
181#define AXI_WRAPPER_IO_WRITE 0x6
182#define AXI_WRAPPER_MEM_WRITE 0x2
183#define AXI_WRAPPER_TYPE0_CFG 0xa
184#define AXI_WRAPPER_TYPE1_CFG 0xb
185#define AXI_WRAPPER_NOR_MSG 0xc
186
187#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
188#define MIN_AXI_ADDR_BITS_PASSED 8
189#define PCIE_RC_SEND_PME_OFF 0x11960
190#define ROCKCHIP_VENDOR_ID 0x1d87
191#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
192#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
193#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
194#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
195#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
196 (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
197 PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
198#define PCIE_LINK_IS_L2(x) \
199 (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
200#define PCIE_LINK_UP(x) \
201 (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
202#define PCIE_LINK_IS_GEN2(x) \
203 (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
204
205#define RC_REGION_0_ADDR_TRANS_H 0x00000000
206#define RC_REGION_0_ADDR_TRANS_L 0x00000000
207#define RC_REGION_0_PASS_BITS (25 - 1)
208#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
209#define MAX_AXI_WRAPPER_REGION_NUM 33
210
211struct rockchip_pcie {
212 void __iomem *reg_base; /* DT axi-base */
213 void __iomem *apb_base; /* DT apb-base */
214 bool legacy_phy;
215 struct phy *phys[MAX_LANE_NUM];
216 struct reset_control *core_rst;
217 struct reset_control *mgmt_rst;
218 struct reset_control *mgmt_sticky_rst;
219 struct reset_control *pipe_rst;
220 struct reset_control *pm_rst;
221 struct reset_control *aclk_rst;
222 struct reset_control *pclk_rst;
223 struct clk *aclk_pcie;
224 struct clk *aclk_perf_pcie;
225 struct clk *hclk_pcie;
226 struct clk *clk_pcie_pm;
227 struct regulator *vpcie12v; /* 12V power supply */
228 struct regulator *vpcie3v3; /* 3.3V power supply */
229 struct regulator *vpcie1v8; /* 1.8V power supply */
230 struct regulator *vpcie0v9; /* 0.9V power supply */
231 struct gpio_desc *ep_gpio;
232 u32 lanes;
233 u8 lanes_map;
234 u8 root_bus_nr;
235 int link_gen;
236 struct device *dev;
237 struct irq_domain *irq_domain;
238 int offset;
239 struct pci_bus *root_bus;
240 struct resource *io;
241 phys_addr_t io_bus_addr;
242 u32 io_size;
243 void __iomem *msg_region;
244 u32 mem_size;
245 phys_addr_t msg_bus_addr;
246 phys_addr_t mem_bus_addr;
247};
248
249static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
250{
251 return readl(rockchip->apb_base + reg);
252}
253
254static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
255 u32 reg)
256{
257 writel(val, rockchip->apb_base + reg);
258}
259
260static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
261{
262 u32 status;
263
264 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
265 status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
266 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
267}
268
269static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
270{
271 u32 status;
272
273 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
274 status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
275 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
276}
277
278static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
279{
280 u32 val;
281
282 /* Update Tx credit maximum update interval */
283 val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
284 val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
285 val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
286 rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
287}
288 24
289static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, 25int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
290 struct pci_bus *bus, int dev)
291{ 26{
292 /* access only one slot on each root port */ 27 struct device *dev = rockchip->dev;
293 if (bus->number == rockchip->root_bus_nr && dev > 0) 28 struct platform_device *pdev = to_platform_device(dev);
294 return 0; 29 struct device_node *node = dev->of_node;
295 30 struct resource *regs;
296 /* 31 int err;
297 * do not read more than one device on the bus directly attached
298 * to RC's downstream side.
299 */
300 if (bus->primary == rockchip->root_bus_nr && dev > 0)
301 return 0;
302
303 return 1;
304}
305
306static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
307{
308 u32 val;
309 u8 map;
310
311 if (rockchip->legacy_phy)
312 return GENMASK(MAX_LANE_NUM - 1, 0);
313
314 val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
315 map = val & PCIE_CORE_LANE_MAP_MASK;
316
317 /* The link may be using a reverse-indexed mapping. */
318 if (val & PCIE_CORE_LANE_MAP_REVERSE)
319 map = bitrev8(map) >> 4;
320
321 return map;
322}
323
324static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
325 int where, int size, u32 *val)
326{
327 void __iomem *addr;
328
329 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
330
331 if (!IS_ALIGNED((uintptr_t)addr, size)) {
332 *val = 0;
333 return PCIBIOS_BAD_REGISTER_NUMBER;
334 }
335 32
336 if (size == 4) { 33 if (rockchip->is_rc) {
337 *val = readl(addr); 34 regs = platform_get_resource_byname(pdev,
338 } else if (size == 2) { 35 IORESOURCE_MEM,
339 *val = readw(addr); 36 "axi-base");
340 } else if (size == 1) { 37 rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
341 *val = readb(addr); 38 if (IS_ERR(rockchip->reg_base))
39 return PTR_ERR(rockchip->reg_base);
342 } else { 40 } else {
343 *val = 0; 41 rockchip->mem_res =
344 return PCIBIOS_BAD_REGISTER_NUMBER; 42 platform_get_resource_byname(pdev, IORESOURCE_MEM,
43 "mem-base");
44 if (!rockchip->mem_res)
45 return -EINVAL;
345 } 46 }
346 return PCIBIOS_SUCCESSFUL;
347}
348 47
349static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, 48 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
350 int where, int size, u32 val) 49 "apb-base");
351{ 50 rockchip->apb_base = devm_ioremap_resource(dev, regs);
352 u32 mask, tmp, offset; 51 if (IS_ERR(rockchip->apb_base))
353 void __iomem *addr; 52 return PTR_ERR(rockchip->apb_base);
354 53
355 offset = where & ~0x3; 54 err = rockchip_pcie_get_phys(rockchip);
356 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; 55 if (err)
56 return err;
357 57
358 if (size == 4) { 58 rockchip->lanes = 1;
359 writel(val, addr); 59 err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
360 return PCIBIOS_SUCCESSFUL; 60 if (!err && (rockchip->lanes == 0 ||
61 rockchip->lanes == 3 ||
62 rockchip->lanes > 4)) {
63 dev_warn(dev, "invalid num-lanes, default to use one lane\n");
64 rockchip->lanes = 1;
361 } 65 }
362 66
363 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); 67 rockchip->link_gen = of_pci_get_max_link_speed(node);
364 68 if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
365 /* 69 rockchip->link_gen = 2;
366 * N.B. This read/modify/write isn't safe in general because it can
367 * corrupt RW1C bits in adjacent registers. But the hardware
368 * doesn't support smaller writes.
369 */
370 tmp = readl(addr) & mask;
371 tmp |= val << ((where & 0x3) * 8);
372 writel(tmp, addr);
373
374 return PCIBIOS_SUCCESSFUL;
375}
376
377static void rockchip_pcie_cfg_configuration_accesses(
378 struct rockchip_pcie *rockchip, u32 type)
379{
380 u32 ob_desc_0;
381
382 /* Configuration Accesses for region 0 */
383 rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
384
385 rockchip_pcie_write(rockchip,
386 (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
387 PCIE_CORE_OB_REGION_ADDR0);
388 rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
389 PCIE_CORE_OB_REGION_ADDR1);
390 ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
391 ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
392 ob_desc_0 |= (type | (0x1 << 23));
393 rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
394 rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
395}
396
397static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
398 struct pci_bus *bus, u32 devfn,
399 int where, int size, u32 *val)
400{
401 u32 busdev;
402
403 busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
404 PCI_FUNC(devfn), where);
405 70
406 if (!IS_ALIGNED(busdev, size)) { 71 rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
407 *val = 0; 72 if (IS_ERR(rockchip->core_rst)) {
408 return PCIBIOS_BAD_REGISTER_NUMBER; 73 if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
74 dev_err(dev, "missing core reset property in node\n");
75 return PTR_ERR(rockchip->core_rst);
409 } 76 }
410 77
411 if (bus->parent->number == rockchip->root_bus_nr) 78 rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
412 rockchip_pcie_cfg_configuration_accesses(rockchip, 79 if (IS_ERR(rockchip->mgmt_rst)) {
413 AXI_WRAPPER_TYPE0_CFG); 80 if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
414 else 81 dev_err(dev, "missing mgmt reset property in node\n");
415 rockchip_pcie_cfg_configuration_accesses(rockchip, 82 return PTR_ERR(rockchip->mgmt_rst);
416 AXI_WRAPPER_TYPE1_CFG);
417
418 if (size == 4) {
419 *val = readl(rockchip->reg_base + busdev);
420 } else if (size == 2) {
421 *val = readw(rockchip->reg_base + busdev);
422 } else if (size == 1) {
423 *val = readb(rockchip->reg_base + busdev);
424 } else {
425 *val = 0;
426 return PCIBIOS_BAD_REGISTER_NUMBER;
427 } 83 }
428 return PCIBIOS_SUCCESSFUL;
429}
430
431static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
432 struct pci_bus *bus, u32 devfn,
433 int where, int size, u32 val)
434{
435 u32 busdev;
436
437 busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
438 PCI_FUNC(devfn), where);
439 if (!IS_ALIGNED(busdev, size))
440 return PCIBIOS_BAD_REGISTER_NUMBER;
441
442 if (bus->parent->number == rockchip->root_bus_nr)
443 rockchip_pcie_cfg_configuration_accesses(rockchip,
444 AXI_WRAPPER_TYPE0_CFG);
445 else
446 rockchip_pcie_cfg_configuration_accesses(rockchip,
447 AXI_WRAPPER_TYPE1_CFG);
448
449 if (size == 4)
450 writel(val, rockchip->reg_base + busdev);
451 else if (size == 2)
452 writew(val, rockchip->reg_base + busdev);
453 else if (size == 1)
454 writeb(val, rockchip->reg_base + busdev);
455 else
456 return PCIBIOS_BAD_REGISTER_NUMBER;
457
458 return PCIBIOS_SUCCESSFUL;
459}
460 84
461static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 85 rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
462 int size, u32 *val) 86 "mgmt-sticky");
463{ 87 if (IS_ERR(rockchip->mgmt_sticky_rst)) {
464 struct rockchip_pcie *rockchip = bus->sysdata; 88 if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
465 89 dev_err(dev, "missing mgmt-sticky reset property in node\n");
466 if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { 90 return PTR_ERR(rockchip->mgmt_sticky_rst);
467 *val = 0xffffffff;
468 return PCIBIOS_DEVICE_NOT_FOUND;
469 } 91 }
470 92
471 if (bus->number == rockchip->root_bus_nr) 93 rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
472 return rockchip_pcie_rd_own_conf(rockchip, where, size, val); 94 if (IS_ERR(rockchip->pipe_rst)) {
473 95 if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
474 return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val); 96 dev_err(dev, "missing pipe reset property in node\n");
475} 97 return PTR_ERR(rockchip->pipe_rst);
98 }
476 99
477static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 100 rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
478 int where, int size, u32 val) 101 if (IS_ERR(rockchip->pm_rst)) {
479{ 102 if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
480 struct rockchip_pcie *rockchip = bus->sysdata; 103 dev_err(dev, "missing pm reset property in node\n");
104 return PTR_ERR(rockchip->pm_rst);
105 }
481 106
482 if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) 107 rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
483 return PCIBIOS_DEVICE_NOT_FOUND; 108 if (IS_ERR(rockchip->pclk_rst)) {
109 if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
110 dev_err(dev, "missing pclk reset property in node\n");
111 return PTR_ERR(rockchip->pclk_rst);
112 }
484 113
485 if (bus->number == rockchip->root_bus_nr) 114 rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
486 return rockchip_pcie_wr_own_conf(rockchip, where, size, val); 115 if (IS_ERR(rockchip->aclk_rst)) {
116 if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
117 dev_err(dev, "missing aclk reset property in node\n");
118 return PTR_ERR(rockchip->aclk_rst);
119 }
487 120
488 return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val); 121 if (rockchip->is_rc) {
489} 122 rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
123 if (IS_ERR(rockchip->ep_gpio)) {
124 dev_err(dev, "missing ep-gpios property in node\n");
125 return PTR_ERR(rockchip->ep_gpio);
126 }
127 }
490 128
491static struct pci_ops rockchip_pcie_ops = { 129 rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
492 .read = rockchip_pcie_rd_conf, 130 if (IS_ERR(rockchip->aclk_pcie)) {
493 .write = rockchip_pcie_wr_conf, 131 dev_err(dev, "aclk clock not found\n");
494}; 132 return PTR_ERR(rockchip->aclk_pcie);
133 }
495 134
496static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) 135 rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
497{ 136 if (IS_ERR(rockchip->aclk_perf_pcie)) {
498 int curr; 137 dev_err(dev, "aclk_perf clock not found\n");
499 u32 status, scale, power; 138 return PTR_ERR(rockchip->aclk_perf_pcie);
139 }
500 140
501 if (IS_ERR(rockchip->vpcie3v3)) 141 rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
502 return; 142 if (IS_ERR(rockchip->hclk_pcie)) {
143 dev_err(dev, "hclk clock not found\n");
144 return PTR_ERR(rockchip->hclk_pcie);
145 }
503 146
504 /* 147 rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
505 * Set RC's captured slot power limit and scale if 148 if (IS_ERR(rockchip->clk_pcie_pm)) {
506 * vpcie3v3 available. The default values are both zero 149 dev_err(dev, "pm clock not found\n");
507 * which means the software should set these two according 150 return PTR_ERR(rockchip->clk_pcie_pm);
508 * to the actual power supply.
509 */
510 curr = regulator_get_current_limit(rockchip->vpcie3v3);
511 if (curr <= 0)
512 return;
513
514 scale = 3; /* 0.001x */
515 curr = curr / 1000; /* convert to mA */
516 power = (curr * 3300) / 1000; /* milliwatt */
517 while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
518 if (!scale) {
519 dev_warn(rockchip->dev, "invalid power supply\n");
520 return;
521 }
522 scale--;
523 power = power / 10;
524 } 151 }
525 152
526 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); 153 return 0;
527 status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
528 (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
529 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
530} 154}
155EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
531 156
532/** 157int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
533 * rockchip_pcie_init_port - Initialize hardware
534 * @rockchip: PCIe port information
535 */
536static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
537{ 158{
538 struct device *dev = rockchip->dev; 159 struct device *dev = rockchip->dev;
539 int err, i; 160 int err, i;
540 u32 status; 161 u32 regs;
541
542 gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
543 162
544 err = reset_control_assert(rockchip->aclk_rst); 163 err = reset_control_assert(rockchip->aclk_rst);
545 if (err) { 164 if (err) {
@@ -618,13 +237,15 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
618 rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, 237 rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
619 PCIE_CLIENT_CONFIG); 238 PCIE_CLIENT_CONFIG);
620 239
621 rockchip_pcie_write(rockchip, 240 regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
622 PCIE_CLIENT_CONF_ENABLE | 241 PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
623 PCIE_CLIENT_LINK_TRAIN_ENABLE | 242
624 PCIE_CLIENT_ARI_ENABLE | 243 if (rockchip->is_rc)
625 PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) | 244 regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
626 PCIE_CLIENT_MODE_RC, 245 else
627 PCIE_CLIENT_CONFIG); 246 regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
247
248 rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG);
628 249
629 for (i = 0; i < MAX_LANE_NUM; i++) { 250 for (i = 0; i < MAX_LANE_NUM; i++) {
630 err = phy_power_on(rockchip->phys[i]); 251 err = phy_power_on(rockchip->phys[i]);
@@ -662,93 +283,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
662 goto err_power_off_phy; 283 goto err_power_off_phy;
663 } 284 }
664 285
665 /* Fix the transmitted FTS count desired to exit from L0s. */
666 status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
667 status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
668 (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
669 rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
670
671 rockchip_pcie_set_power_limit(rockchip);
672
673 /* Set RC's clock architecture as common clock */
674 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
675 status |= PCI_EXP_LNKSTA_SLC << 16;
676 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
677
678 /* Set RC's RCB to 128 */
679 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
680 status |= PCI_EXP_LNKCTL_RCB;
681 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
682
683 /* Enable Gen1 training */
684 rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
685 PCIE_CLIENT_CONFIG);
686
687 gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
688
689 /* 500ms timeout value should be enough for Gen1/2 training */
690 err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
691 status, PCIE_LINK_UP(status), 20,
692 500 * USEC_PER_MSEC);
693 if (err) {
694 dev_err(dev, "PCIe link training gen1 timeout!\n");
695 goto err_power_off_phy;
696 }
697
698 if (rockchip->link_gen == 2) {
699 /*
700 * Enable retrain for gen2. This should be configured only after
701 * gen1 finished.
702 */
703 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
704 status |= PCI_EXP_LNKCTL_RL;
705 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
706
707 err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
708 status, PCIE_LINK_IS_GEN2(status), 20,
709 500 * USEC_PER_MSEC);
710 if (err)
711 dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
712 }
713
714 /* Check the final link width from negotiated lane counter from MGMT */
715 status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
716 status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
717 PCIE_CORE_PL_CONF_LANE_SHIFT);
718 dev_dbg(dev, "current link width is x%d\n", status);
719
720 /* Power off unused lane(s) */
721 rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
722 for (i = 0; i < MAX_LANE_NUM; i++) {
723 if (!(rockchip->lanes_map & BIT(i))) {
724 dev_dbg(dev, "idling lane %d\n", i);
725 phy_power_off(rockchip->phys[i]);
726 }
727 }
728
729 rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
730 PCIE_CORE_CONFIG_VENDOR);
731 rockchip_pcie_write(rockchip,
732 PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
733 PCIE_RC_CONFIG_RID_CCR);
734
735 /* Clear THP cap's next cap pointer to remove L1 substate cap */
736 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
737 status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
738 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
739
740 /* Clear L0s from RC's link cap */
741 if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
742 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
743 status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
744 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
745 }
746
747 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
748 status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
749 status |= PCIE_RC_CONFIG_DCSR_MPS_256;
750 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
751
752 return 0; 286 return 0;
753err_power_off_phy: 287err_power_off_phy:
754 while (i--) 288 while (i--)
@@ -759,156 +293,9 @@ err_exit_phy:
759 phy_exit(rockchip->phys[i]); 293 phy_exit(rockchip->phys[i]);
760 return err; 294 return err;
761} 295}
296EXPORT_SYMBOL_GPL(rockchip_pcie_init_port);
762 297
763static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) 298int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
764{
765 int i;
766
767 for (i = 0; i < MAX_LANE_NUM; i++) {
768 /* inactive lanes are already powered off */
769 if (rockchip->lanes_map & BIT(i))
770 phy_power_off(rockchip->phys[i]);
771 phy_exit(rockchip->phys[i]);
772 }
773}
774
775static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
776{
777 struct rockchip_pcie *rockchip = arg;
778 struct device *dev = rockchip->dev;
779 u32 reg;
780 u32 sub_reg;
781
782 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
783 if (reg & PCIE_CLIENT_INT_LOCAL) {
784 dev_dbg(dev, "local interrupt received\n");
785 sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
786 if (sub_reg & PCIE_CORE_INT_PRFPE)
787 dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
788
789 if (sub_reg & PCIE_CORE_INT_CRFPE)
790 dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
791
792 if (sub_reg & PCIE_CORE_INT_RRPE)
793 dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
794
795 if (sub_reg & PCIE_CORE_INT_PRFO)
796 dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
797
798 if (sub_reg & PCIE_CORE_INT_CRFO)
799 dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
800
801 if (sub_reg & PCIE_CORE_INT_RT)
802 dev_dbg(dev, "replay timer timed out\n");
803
804 if (sub_reg & PCIE_CORE_INT_RTR)
805 dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
806
807 if (sub_reg & PCIE_CORE_INT_PE)
808 dev_dbg(dev, "phy error detected on receive side\n");
809
810 if (sub_reg & PCIE_CORE_INT_MTR)
811 dev_dbg(dev, "malformed TLP received from the link\n");
812
813 if (sub_reg & PCIE_CORE_INT_UCR)
814 dev_dbg(dev, "malformed TLP received from the link\n");
815
816 if (sub_reg & PCIE_CORE_INT_FCE)
817 dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
818
819 if (sub_reg & PCIE_CORE_INT_CT)
820 dev_dbg(dev, "a request timed out waiting for completion\n");
821
822 if (sub_reg & PCIE_CORE_INT_UTC)
823 dev_dbg(dev, "unmapped TC error\n");
824
825 if (sub_reg & PCIE_CORE_INT_MMVC)
826 dev_dbg(dev, "MSI mask register changes\n");
827
828 rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
829 } else if (reg & PCIE_CLIENT_INT_PHY) {
830 dev_dbg(dev, "phy link changes\n");
831 rockchip_pcie_update_txcredit_mui(rockchip);
832 rockchip_pcie_clr_bw_int(rockchip);
833 }
834
835 rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
836 PCIE_CLIENT_INT_STATUS);
837
838 return IRQ_HANDLED;
839}
840
841static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
842{
843 struct rockchip_pcie *rockchip = arg;
844 struct device *dev = rockchip->dev;
845 u32 reg;
846
847 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
848 if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
849 dev_dbg(dev, "legacy done interrupt received\n");
850
851 if (reg & PCIE_CLIENT_INT_MSG)
852 dev_dbg(dev, "message done interrupt received\n");
853
854 if (reg & PCIE_CLIENT_INT_HOT_RST)
855 dev_dbg(dev, "hot reset interrupt received\n");
856
857 if (reg & PCIE_CLIENT_INT_DPA)
858 dev_dbg(dev, "dpa interrupt received\n");
859
860 if (reg & PCIE_CLIENT_INT_FATAL_ERR)
861 dev_dbg(dev, "fatal error interrupt received\n");
862
863 if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
864 dev_dbg(dev, "no fatal error interrupt received\n");
865
866 if (reg & PCIE_CLIENT_INT_CORR_ERR)
867 dev_dbg(dev, "correctable error interrupt received\n");
868
869 if (reg & PCIE_CLIENT_INT_PHY)
870 dev_dbg(dev, "phy interrupt received\n");
871
872 rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
873 PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
874 PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
875 PCIE_CLIENT_INT_NFATAL_ERR |
876 PCIE_CLIENT_INT_CORR_ERR |
877 PCIE_CLIENT_INT_PHY),
878 PCIE_CLIENT_INT_STATUS);
879
880 return IRQ_HANDLED;
881}
882
883static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
884{
885 struct irq_chip *chip = irq_desc_get_chip(desc);
886 struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
887 struct device *dev = rockchip->dev;
888 u32 reg;
889 u32 hwirq;
890 u32 virq;
891
892 chained_irq_enter(chip, desc);
893
894 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
895 reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
896
897 while (reg) {
898 hwirq = ffs(reg) - 1;
899 reg &= ~BIT(hwirq);
900
901 virq = irq_find_mapping(rockchip->irq_domain, hwirq);
902 if (virq)
903 generic_handle_irq(virq);
904 else
905 dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
906 }
907
908 chained_irq_exit(chip, desc);
909}
910
911static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
912{ 299{
913 struct device *dev = rockchip->dev; 300 struct device *dev = rockchip->dev;
914 struct phy *phy; 301 struct phy *phy;
@@ -948,452 +335,22 @@ static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
948 335
949 return 0; 336 return 0;
950} 337}
338EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys);
951 339
952static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) 340void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
953{ 341{
954 int irq, err; 342 int i;
955 struct device *dev = rockchip->dev;
956 struct platform_device *pdev = to_platform_device(dev);
957
958 irq = platform_get_irq_byname(pdev, "sys");
959 if (irq < 0) {
960 dev_err(dev, "missing sys IRQ resource\n");
961 return irq;
962 }
963
964 err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
965 IRQF_SHARED, "pcie-sys", rockchip);
966 if (err) {
967 dev_err(dev, "failed to request PCIe subsystem IRQ\n");
968 return err;
969 }
970
971 irq = platform_get_irq_byname(pdev, "legacy");
972 if (irq < 0) {
973 dev_err(dev, "missing legacy IRQ resource\n");
974 return irq;
975 }
976
977 irq_set_chained_handler_and_data(irq,
978 rockchip_pcie_legacy_int_handler,
979 rockchip);
980
981 irq = platform_get_irq_byname(pdev, "client");
982 if (irq < 0) {
983 dev_err(dev, "missing client IRQ resource\n");
984 return irq;
985 }
986
987 err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
988 IRQF_SHARED, "pcie-client", rockchip);
989 if (err) {
990 dev_err(dev, "failed to request PCIe client IRQ\n");
991 return err;
992 }
993
994 return 0;
995}
996
997/**
998 * rockchip_pcie_parse_dt - Parse Device Tree
999 * @rockchip: PCIe port information
1000 *
1001 * Return: '0' on success and error value on failure
1002 */
1003static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
1004{
1005 struct device *dev = rockchip->dev;
1006 struct platform_device *pdev = to_platform_device(dev);
1007 struct device_node *node = dev->of_node;
1008 struct resource *regs;
1009 int err;
1010
1011 regs = platform_get_resource_byname(pdev,
1012 IORESOURCE_MEM,
1013 "axi-base");
1014 rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
1015 if (IS_ERR(rockchip->reg_base))
1016 return PTR_ERR(rockchip->reg_base);
1017
1018 regs = platform_get_resource_byname(pdev,
1019 IORESOURCE_MEM,
1020 "apb-base");
1021 rockchip->apb_base = devm_ioremap_resource(dev, regs);
1022 if (IS_ERR(rockchip->apb_base))
1023 return PTR_ERR(rockchip->apb_base);
1024
1025 err = rockchip_pcie_get_phys(rockchip);
1026 if (err)
1027 return err;
1028
1029 rockchip->lanes = 1;
1030 err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
1031 if (!err && (rockchip->lanes == 0 ||
1032 rockchip->lanes == 3 ||
1033 rockchip->lanes > 4)) {
1034 dev_warn(dev, "invalid num-lanes, default to use one lane\n");
1035 rockchip->lanes = 1;
1036 }
1037
1038 rockchip->link_gen = of_pci_get_max_link_speed(node);
1039 if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
1040 rockchip->link_gen = 2;
1041
1042 rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
1043 if (IS_ERR(rockchip->core_rst)) {
1044 if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
1045 dev_err(dev, "missing core reset property in node\n");
1046 return PTR_ERR(rockchip->core_rst);
1047 }
1048
1049 rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
1050 if (IS_ERR(rockchip->mgmt_rst)) {
1051 if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
1052 dev_err(dev, "missing mgmt reset property in node\n");
1053 return PTR_ERR(rockchip->mgmt_rst);
1054 }
1055
1056 rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
1057 "mgmt-sticky");
1058 if (IS_ERR(rockchip->mgmt_sticky_rst)) {
1059 if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
1060 dev_err(dev, "missing mgmt-sticky reset property in node\n");
1061 return PTR_ERR(rockchip->mgmt_sticky_rst);
1062 }
1063
1064 rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
1065 if (IS_ERR(rockchip->pipe_rst)) {
1066 if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
1067 dev_err(dev, "missing pipe reset property in node\n");
1068 return PTR_ERR(rockchip->pipe_rst);
1069 }
1070
1071 rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
1072 if (IS_ERR(rockchip->pm_rst)) {
1073 if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
1074 dev_err(dev, "missing pm reset property in node\n");
1075 return PTR_ERR(rockchip->pm_rst);
1076 }
1077
1078 rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
1079 if (IS_ERR(rockchip->pclk_rst)) {
1080 if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
1081 dev_err(dev, "missing pclk reset property in node\n");
1082 return PTR_ERR(rockchip->pclk_rst);
1083 }
1084
1085 rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
1086 if (IS_ERR(rockchip->aclk_rst)) {
1087 if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
1088 dev_err(dev, "missing aclk reset property in node\n");
1089 return PTR_ERR(rockchip->aclk_rst);
1090 }
1091
1092 rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
1093 if (IS_ERR(rockchip->ep_gpio)) {
1094 dev_err(dev, "missing ep-gpios property in node\n");
1095 return PTR_ERR(rockchip->ep_gpio);
1096 }
1097
1098 rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
1099 if (IS_ERR(rockchip->aclk_pcie)) {
1100 dev_err(dev, "aclk clock not found\n");
1101 return PTR_ERR(rockchip->aclk_pcie);
1102 }
1103
1104 rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
1105 if (IS_ERR(rockchip->aclk_perf_pcie)) {
1106 dev_err(dev, "aclk_perf clock not found\n");
1107 return PTR_ERR(rockchip->aclk_perf_pcie);
1108 }
1109
1110 rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
1111 if (IS_ERR(rockchip->hclk_pcie)) {
1112 dev_err(dev, "hclk clock not found\n");
1113 return PTR_ERR(rockchip->hclk_pcie);
1114 }
1115
1116 rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
1117 if (IS_ERR(rockchip->clk_pcie_pm)) {
1118 dev_err(dev, "pm clock not found\n");
1119 return PTR_ERR(rockchip->clk_pcie_pm);
1120 }
1121
1122 err = rockchip_pcie_setup_irq(rockchip);
1123 if (err)
1124 return err;
1125
1126 rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
1127 if (IS_ERR(rockchip->vpcie12v)) {
1128 if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
1129 return -EPROBE_DEFER;
1130 dev_info(dev, "no vpcie12v regulator found\n");
1131 }
1132
1133 rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
1134 if (IS_ERR(rockchip->vpcie3v3)) {
1135 if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
1136 return -EPROBE_DEFER;
1137 dev_info(dev, "no vpcie3v3 regulator found\n");
1138 }
1139
1140 rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
1141 if (IS_ERR(rockchip->vpcie1v8)) {
1142 if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
1143 return -EPROBE_DEFER;
1144 dev_info(dev, "no vpcie1v8 regulator found\n");
1145 }
1146
1147 rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
1148 if (IS_ERR(rockchip->vpcie0v9)) {
1149 if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
1150 return -EPROBE_DEFER;
1151 dev_info(dev, "no vpcie0v9 regulator found\n");
1152 }
1153
1154 return 0;
1155}
1156
1157static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
1158{
1159 struct device *dev = rockchip->dev;
1160 int err;
1161
1162 if (!IS_ERR(rockchip->vpcie12v)) {
1163 err = regulator_enable(rockchip->vpcie12v);
1164 if (err) {
1165 dev_err(dev, "fail to enable vpcie12v regulator\n");
1166 goto err_out;
1167 }
1168 }
1169
1170 if (!IS_ERR(rockchip->vpcie3v3)) {
1171 err = regulator_enable(rockchip->vpcie3v3);
1172 if (err) {
1173 dev_err(dev, "fail to enable vpcie3v3 regulator\n");
1174 goto err_disable_12v;
1175 }
1176 }
1177
1178 if (!IS_ERR(rockchip->vpcie1v8)) {
1179 err = regulator_enable(rockchip->vpcie1v8);
1180 if (err) {
1181 dev_err(dev, "fail to enable vpcie1v8 regulator\n");
1182 goto err_disable_3v3;
1183 }
1184 }
1185
1186 if (!IS_ERR(rockchip->vpcie0v9)) {
1187 err = regulator_enable(rockchip->vpcie0v9);
1188 if (err) {
1189 dev_err(dev, "fail to enable vpcie0v9 regulator\n");
1190 goto err_disable_1v8;
1191 }
1192 }
1193
1194 return 0;
1195
1196err_disable_1v8:
1197 if (!IS_ERR(rockchip->vpcie1v8))
1198 regulator_disable(rockchip->vpcie1v8);
1199err_disable_3v3:
1200 if (!IS_ERR(rockchip->vpcie3v3))
1201 regulator_disable(rockchip->vpcie3v3);
1202err_disable_12v:
1203 if (!IS_ERR(rockchip->vpcie12v))
1204 regulator_disable(rockchip->vpcie12v);
1205err_out:
1206 return err;
1207}
1208
1209static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
1210{
1211 rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
1212 (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
1213 rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
1214 PCIE_CORE_INT_MASK);
1215
1216 rockchip_pcie_enable_bw_int(rockchip);
1217}
1218
1219static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
1220 irq_hw_number_t hwirq)
1221{
1222 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
1223 irq_set_chip_data(irq, domain->host_data);
1224
1225 return 0;
1226}
1227
1228static const struct irq_domain_ops intx_domain_ops = {
1229 .map = rockchip_pcie_intx_map,
1230};
1231
1232static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
1233{
1234 struct device *dev = rockchip->dev;
1235 struct device_node *intc = of_get_next_child(dev->of_node, NULL);
1236
1237 if (!intc) {
1238 dev_err(dev, "missing child interrupt-controller node\n");
1239 return -EINVAL;
1240 }
1241
1242 rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
1243 &intx_domain_ops, rockchip);
1244 if (!rockchip->irq_domain) {
1245 dev_err(dev, "failed to get a INTx IRQ domain\n");
1246 return -EINVAL;
1247 }
1248
1249 return 0;
1250}
1251
1252static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
1253 int region_no, int type, u8 num_pass_bits,
1254 u32 lower_addr, u32 upper_addr)
1255{
1256 u32 ob_addr_0;
1257 u32 ob_addr_1;
1258 u32 ob_desc_0;
1259 u32 aw_offset;
1260
1261 if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
1262 return -EINVAL;
1263 if (num_pass_bits + 1 < 8)
1264 return -EINVAL;
1265 if (num_pass_bits > 63)
1266 return -EINVAL;
1267 if (region_no == 0) {
1268 if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
1269 return -EINVAL;
1270 }
1271 if (region_no != 0) {
1272 if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
1273 return -EINVAL;
1274 }
1275
1276 aw_offset = (region_no << OB_REG_SIZE_SHIFT);
1277
1278 ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
1279 ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
1280 ob_addr_1 = upper_addr;
1281 ob_desc_0 = (1 << 23 | type);
1282
1283 rockchip_pcie_write(rockchip, ob_addr_0,
1284 PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
1285 rockchip_pcie_write(rockchip, ob_addr_1,
1286 PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
1287 rockchip_pcie_write(rockchip, ob_desc_0,
1288 PCIE_CORE_OB_REGION_DESC0 + aw_offset);
1289 rockchip_pcie_write(rockchip, 0,
1290 PCIE_CORE_OB_REGION_DESC1 + aw_offset);
1291
1292 return 0;
1293}
1294
1295static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
1296 int region_no, u8 num_pass_bits,
1297 u32 lower_addr, u32 upper_addr)
1298{
1299 u32 ib_addr_0;
1300 u32 ib_addr_1;
1301 u32 aw_offset;
1302
1303 if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
1304 return -EINVAL;
1305 if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
1306 return -EINVAL;
1307 if (num_pass_bits > 63)
1308 return -EINVAL;
1309
1310 aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
1311
1312 ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
1313 ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
1314 ib_addr_1 = upper_addr;
1315
1316 rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
1317 rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
1318
1319 return 0;
1320}
1321
1322static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
1323{
1324 struct device *dev = rockchip->dev;
1325 int offset;
1326 int err;
1327 int reg_no;
1328
1329 rockchip_pcie_cfg_configuration_accesses(rockchip,
1330 AXI_WRAPPER_TYPE0_CFG);
1331
1332 for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
1333 err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
1334 AXI_WRAPPER_MEM_WRITE,
1335 20 - 1,
1336 rockchip->mem_bus_addr +
1337 (reg_no << 20),
1338 0);
1339 if (err) {
1340 dev_err(dev, "program RC mem outbound ATU failed\n");
1341 return err;
1342 }
1343 }
1344
1345 err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
1346 if (err) {
1347 dev_err(dev, "program RC mem inbound ATU failed\n");
1348 return err;
1349 }
1350
1351 offset = rockchip->mem_size >> 20;
1352 for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
1353 err = rockchip_pcie_prog_ob_atu(rockchip,
1354 reg_no + 1 + offset,
1355 AXI_WRAPPER_IO_WRITE,
1356 20 - 1,
1357 rockchip->io_bus_addr +
1358 (reg_no << 20),
1359 0);
1360 if (err) {
1361 dev_err(dev, "program RC io outbound ATU failed\n");
1362 return err;
1363 }
1364 }
1365
1366 /* assign message regions */
1367 rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
1368 AXI_WRAPPER_NOR_MSG,
1369 20 - 1, 0, 0);
1370
1371 rockchip->msg_bus_addr = rockchip->mem_bus_addr +
1372 ((reg_no + offset) << 20);
1373 return err;
1374}
1375
1376static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
1377{
1378 u32 value;
1379 int err;
1380
1381 /* send PME_TURN_OFF message */
1382 writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
1383 343
1384 /* read LTSSM and wait for falling into L2 link state */ 344 for (i = 0; i < MAX_LANE_NUM; i++) {
1385 err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, 345 /* inactive lanes are already powered off */
1386 value, PCIE_LINK_IS_L2(value), 20, 346 if (rockchip->lanes_map & BIT(i))
1387 jiffies_to_usecs(5 * HZ)); 347 phy_power_off(rockchip->phys[i]);
1388 if (err) { 348 phy_exit(rockchip->phys[i]);
1389 dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
1390 return err;
1391 } 349 }
1392
1393 return 0;
1394} 350}
351EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys);
1395 352
1396static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) 353int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
1397{ 354{
1398 struct device *dev = rockchip->dev; 355 struct device *dev = rockchip->dev;
1399 int err; 356 int err;
@@ -1432,8 +389,9 @@ err_aclk_perf_pcie:
1432 clk_disable_unprepare(rockchip->aclk_pcie); 389 clk_disable_unprepare(rockchip->aclk_pcie);
1433 return err; 390 return err;
1434} 391}
392EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
1435 393
1436static void rockchip_pcie_disable_clocks(void *data) 394void rockchip_pcie_disable_clocks(void *data)
1437{ 395{
1438 struct rockchip_pcie *rockchip = data; 396 struct rockchip_pcie *rockchip = data;
1439 397
@@ -1442,267 +400,25 @@ static void rockchip_pcie_disable_clocks(void *data)
1442 clk_disable_unprepare(rockchip->aclk_perf_pcie); 400 clk_disable_unprepare(rockchip->aclk_perf_pcie);
1443 clk_disable_unprepare(rockchip->aclk_pcie); 401 clk_disable_unprepare(rockchip->aclk_pcie);
1444} 402}
403EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
1445 404
1446static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) 405void rockchip_pcie_cfg_configuration_accesses(
1447{ 406 struct rockchip_pcie *rockchip, u32 type)
1448 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1449 int ret;
1450
1451 /* disable core and cli int since we don't need to ack PME_ACK */
1452 rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
1453 PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
1454 rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
1455
1456 ret = rockchip_pcie_wait_l2(rockchip);
1457 if (ret) {
1458 rockchip_pcie_enable_interrupts(rockchip);
1459 return ret;
1460 }
1461
1462 rockchip_pcie_deinit_phys(rockchip);
1463
1464 rockchip_pcie_disable_clocks(rockchip);
1465
1466 if (!IS_ERR(rockchip->vpcie0v9))
1467 regulator_disable(rockchip->vpcie0v9);
1468
1469 return ret;
1470}
1471
1472static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
1473{
1474 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1475 int err;
1476
1477 if (!IS_ERR(rockchip->vpcie0v9)) {
1478 err = regulator_enable(rockchip->vpcie0v9);
1479 if (err) {
1480 dev_err(dev, "fail to enable vpcie0v9 regulator\n");
1481 return err;
1482 }
1483 }
1484
1485 err = rockchip_pcie_enable_clocks(rockchip);
1486 if (err)
1487 goto err_disable_0v9;
1488
1489 err = rockchip_pcie_init_port(rockchip);
1490 if (err)
1491 goto err_pcie_resume;
1492
1493 err = rockchip_pcie_cfg_atu(rockchip);
1494 if (err)
1495 goto err_err_deinit_port;
1496
1497 /* Need this to enter L1 again */
1498 rockchip_pcie_update_txcredit_mui(rockchip);
1499 rockchip_pcie_enable_interrupts(rockchip);
1500
1501 return 0;
1502
1503err_err_deinit_port:
1504 rockchip_pcie_deinit_phys(rockchip);
1505err_pcie_resume:
1506 rockchip_pcie_disable_clocks(rockchip);
1507err_disable_0v9:
1508 if (!IS_ERR(rockchip->vpcie0v9))
1509 regulator_disable(rockchip->vpcie0v9);
1510 return err;
1511}
1512
1513static int rockchip_pcie_probe(struct platform_device *pdev)
1514{
1515 struct rockchip_pcie *rockchip;
1516 struct device *dev = &pdev->dev;
1517 struct pci_bus *bus, *child;
1518 struct pci_host_bridge *bridge;
1519 struct resource_entry *win;
1520 resource_size_t io_base;
1521 struct resource *mem;
1522 struct resource *io;
1523 int err;
1524
1525 LIST_HEAD(res);
1526
1527 if (!dev->of_node)
1528 return -ENODEV;
1529
1530 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
1531 if (!bridge)
1532 return -ENOMEM;
1533
1534 rockchip = pci_host_bridge_priv(bridge);
1535
1536 platform_set_drvdata(pdev, rockchip);
1537 rockchip->dev = dev;
1538
1539 err = rockchip_pcie_parse_dt(rockchip);
1540 if (err)
1541 return err;
1542
1543 err = rockchip_pcie_enable_clocks(rockchip);
1544 if (err)
1545 return err;
1546
1547 err = rockchip_pcie_set_vpcie(rockchip);
1548 if (err) {
1549 dev_err(dev, "failed to set vpcie regulator\n");
1550 goto err_set_vpcie;
1551 }
1552
1553 err = rockchip_pcie_init_port(rockchip);
1554 if (err)
1555 goto err_vpcie;
1556
1557 rockchip_pcie_enable_interrupts(rockchip);
1558
1559 err = rockchip_pcie_init_irq_domain(rockchip);
1560 if (err < 0)
1561 goto err_deinit_port;
1562
1563 err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
1564 &res, &io_base);
1565 if (err)
1566 goto err_remove_irq_domain;
1567
1568 err = devm_request_pci_bus_resources(dev, &res);
1569 if (err)
1570 goto err_free_res;
1571
1572 /* Get the I/O and memory ranges from DT */
1573 resource_list_for_each_entry(win, &res) {
1574 switch (resource_type(win->res)) {
1575 case IORESOURCE_IO:
1576 io = win->res;
1577 io->name = "I/O";
1578 rockchip->io_size = resource_size(io);
1579 rockchip->io_bus_addr = io->start - win->offset;
1580 err = pci_remap_iospace(io, io_base);
1581 if (err) {
1582 dev_warn(dev, "error %d: failed to map resource %pR\n",
1583 err, io);
1584 continue;
1585 }
1586 rockchip->io = io;
1587 break;
1588 case IORESOURCE_MEM:
1589 mem = win->res;
1590 mem->name = "MEM";
1591 rockchip->mem_size = resource_size(mem);
1592 rockchip->mem_bus_addr = mem->start - win->offset;
1593 break;
1594 case IORESOURCE_BUS:
1595 rockchip->root_bus_nr = win->res->start;
1596 break;
1597 default:
1598 continue;
1599 }
1600 }
1601
1602 err = rockchip_pcie_cfg_atu(rockchip);
1603 if (err)
1604 goto err_unmap_iospace;
1605
1606 rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
1607 if (!rockchip->msg_region) {
1608 err = -ENOMEM;
1609 goto err_unmap_iospace;
1610 }
1611
1612 list_splice_init(&res, &bridge->windows);
1613 bridge->dev.parent = dev;
1614 bridge->sysdata = rockchip;
1615 bridge->busnr = 0;
1616 bridge->ops = &rockchip_pcie_ops;
1617 bridge->map_irq = of_irq_parse_and_map_pci;
1618 bridge->swizzle_irq = pci_common_swizzle;
1619
1620 err = pci_scan_root_bus_bridge(bridge);
1621 if (err < 0)
1622 goto err_unmap_iospace;
1623
1624 bus = bridge->bus;
1625
1626 rockchip->root_bus = bus;
1627
1628 pci_bus_size_bridges(bus);
1629 pci_bus_assign_resources(bus);
1630 list_for_each_entry(child, &bus->children, node)
1631 pcie_bus_configure_settings(child);
1632
1633 pci_bus_add_devices(bus);
1634 return 0;
1635
1636err_unmap_iospace:
1637 pci_unmap_iospace(rockchip->io);
1638err_free_res:
1639 pci_free_resource_list(&res);
1640err_remove_irq_domain:
1641 irq_domain_remove(rockchip->irq_domain);
1642err_deinit_port:
1643 rockchip_pcie_deinit_phys(rockchip);
1644err_vpcie:
1645 if (!IS_ERR(rockchip->vpcie12v))
1646 regulator_disable(rockchip->vpcie12v);
1647 if (!IS_ERR(rockchip->vpcie3v3))
1648 regulator_disable(rockchip->vpcie3v3);
1649 if (!IS_ERR(rockchip->vpcie1v8))
1650 regulator_disable(rockchip->vpcie1v8);
1651 if (!IS_ERR(rockchip->vpcie0v9))
1652 regulator_disable(rockchip->vpcie0v9);
1653err_set_vpcie:
1654 rockchip_pcie_disable_clocks(rockchip);
1655 return err;
1656}
1657
1658static int rockchip_pcie_remove(struct platform_device *pdev)
1659{ 407{
1660 struct device *dev = &pdev->dev; 408 u32 ob_desc_0;
1661 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1662
1663 pci_stop_root_bus(rockchip->root_bus);
1664 pci_remove_root_bus(rockchip->root_bus);
1665 pci_unmap_iospace(rockchip->io);
1666 irq_domain_remove(rockchip->irq_domain);
1667
1668 rockchip_pcie_deinit_phys(rockchip);
1669
1670 rockchip_pcie_disable_clocks(rockchip);
1671 409
1672 if (!IS_ERR(rockchip->vpcie12v)) 410 /* Configuration Accesses for region 0 */
1673 regulator_disable(rockchip->vpcie12v); 411 rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
1674 if (!IS_ERR(rockchip->vpcie3v3))
1675 regulator_disable(rockchip->vpcie3v3);
1676 if (!IS_ERR(rockchip->vpcie1v8))
1677 regulator_disable(rockchip->vpcie1v8);
1678 if (!IS_ERR(rockchip->vpcie0v9))
1679 regulator_disable(rockchip->vpcie0v9);
1680 412
1681 return 0; 413 rockchip_pcie_write(rockchip,
414 (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
415 PCIE_CORE_OB_REGION_ADDR0);
416 rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
417 PCIE_CORE_OB_REGION_ADDR1);
418 ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
419 ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
420 ob_desc_0 |= (type | (0x1 << 23));
421 rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
422 rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
1682} 423}
1683 424EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
1684static const struct dev_pm_ops rockchip_pcie_pm_ops = {
1685 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
1686 rockchip_pcie_resume_noirq)
1687};
1688
1689static const struct of_device_id rockchip_pcie_of_match[] = {
1690 { .compatible = "rockchip,rk3399-pcie", },
1691 {}
1692};
1693MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
1694
1695static struct platform_driver rockchip_pcie_driver = {
1696 .driver = {
1697 .name = "rockchip-pcie",
1698 .of_match_table = rockchip_pcie_of_match,
1699 .pm = &rockchip_pcie_pm_ops,
1700 },
1701 .probe = rockchip_pcie_probe,
1702 .remove = rockchip_pcie_remove,
1703};
1704module_platform_driver(rockchip_pcie_driver);
1705
1706MODULE_AUTHOR("Rockchip Inc");
1707MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
1708MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-rockchip.h b/drivers/pci/host/pcie-rockchip.h
new file mode 100644
index 000000000000..8e87a059ce73
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip.h
@@ -0,0 +1,338 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Rockchip AXI PCIe controller driver
4 *
5 * Copyright (c) 2018 Rockchip, Inc.
6 *
7 * Author: Shawn Lin <shawn.lin@rock-chips.com>
8 *
9 */
10
11#ifndef _PCIE_ROCKCHIP_H
12#define _PCIE_ROCKCHIP_H
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16
17/*
18 * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
19 * bits. This allows atomic updates of the register without locking.
20 */
21#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
22#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
23
24#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
25#define MAX_LANE_NUM 4
26#define MAX_REGION_LIMIT 32
27#define MIN_EP_APERTURE 28
28
29#define PCIE_CLIENT_BASE 0x0
30#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
31#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
32#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
33#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
34#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
35#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
36#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
37#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
38#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
39#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
40#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
41#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
42#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
43#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
44#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
45#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
46#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
47#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
48#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
49#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
50#define PCIE_CLIENT_INTR_SHIFT 5
51#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
52#define PCIE_CLIENT_INT_MSG BIT(14)
53#define PCIE_CLIENT_INT_HOT_RST BIT(13)
54#define PCIE_CLIENT_INT_DPA BIT(12)
55#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
56#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
57#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
58#define PCIE_CLIENT_INT_INTD BIT(8)
59#define PCIE_CLIENT_INT_INTC BIT(7)
60#define PCIE_CLIENT_INT_INTB BIT(6)
61#define PCIE_CLIENT_INT_INTA BIT(5)
62#define PCIE_CLIENT_INT_LOCAL BIT(4)
63#define PCIE_CLIENT_INT_UDMA BIT(3)
64#define PCIE_CLIENT_INT_PHY BIT(2)
65#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
66#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
67
68#define PCIE_CLIENT_INT_LEGACY \
69 (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
70 PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
71
72#define PCIE_CLIENT_INT_CLI \
73 (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
74 PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
75 PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
76 PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
77 PCIE_CLIENT_INT_PHY)
78
79#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
80#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
81#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
82#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
83#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
84#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
85#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
86#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
87#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
88#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
89#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
90#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
91#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
92#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
93 (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
94#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
95#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
96#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
97#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
98#define PCIE_CORE_INT_PRFPE BIT(0)
99#define PCIE_CORE_INT_CRFPE BIT(1)
100#define PCIE_CORE_INT_RRPE BIT(2)
101#define PCIE_CORE_INT_PRFO BIT(3)
102#define PCIE_CORE_INT_CRFO BIT(4)
103#define PCIE_CORE_INT_RT BIT(5)
104#define PCIE_CORE_INT_RTR BIT(6)
105#define PCIE_CORE_INT_PE BIT(7)
106#define PCIE_CORE_INT_MTR BIT(8)
107#define PCIE_CORE_INT_UCR BIT(9)
108#define PCIE_CORE_INT_FCE BIT(10)
109#define PCIE_CORE_INT_CT BIT(11)
110#define PCIE_CORE_INT_UTC BIT(18)
111#define PCIE_CORE_INT_MMVC BIT(19)
112#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
113#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
114#define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0)
115#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
116#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0
117#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1
118#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4
119#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
120#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6
121#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
122
123#define PCIE_CORE_INT \
124 (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
125 PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
126 PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
127 PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
128 PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
129 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
130 PCIE_CORE_INT_MMVC)
131
132#define PCIE_RC_RP_ATS_BASE 0x400000
133#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
134#define PCIE_RC_CONFIG_BASE 0xa00000
135#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
136#define PCIE_RC_CONFIG_SCC_SHIFT 16
137#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
138#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
139#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
140#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
141#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
142#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
143#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
144#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
145#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
146#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
147#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
148#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
149#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
150
151#define PCIE_CORE_AXI_CONF_BASE 0xc00000
152#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
153#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
154#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
155#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
156#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
157#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
158
159#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
160#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
161#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
162#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
163#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
164
165/* Size of one AXI Region (not Region 0) */
166#define AXI_REGION_SIZE BIT(20)
167/* Size of Region 0, equal to sum of sizes of other regions */
168#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
169#define OB_REG_SIZE_SHIFT 5
170#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
171#define AXI_WRAPPER_IO_WRITE 0x6
172#define AXI_WRAPPER_MEM_WRITE 0x2
173#define AXI_WRAPPER_TYPE0_CFG 0xa
174#define AXI_WRAPPER_TYPE1_CFG 0xb
175#define AXI_WRAPPER_NOR_MSG 0xc
176
177#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
178#define MIN_AXI_ADDR_BITS_PASSED 8
179#define PCIE_RC_SEND_PME_OFF 0x11960
180#define ROCKCHIP_VENDOR_ID 0x1d87
181#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
182#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
183#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
184#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
185#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
186 (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
187 PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
188#define PCIE_LINK_IS_L2(x) \
189 (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
190#define PCIE_LINK_UP(x) \
191 (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
192#define PCIE_LINK_IS_GEN2(x) \
193 (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
194
195#define RC_REGION_0_ADDR_TRANS_H 0x00000000
196#define RC_REGION_0_ADDR_TRANS_L 0x00000000
197#define RC_REGION_0_PASS_BITS (25 - 1)
198#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
199#define MAX_AXI_WRAPPER_REGION_NUM 33
200
201#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
202#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
203#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
204#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
205#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
206#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
207#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
208#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
209#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
210#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
211#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
212#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
213#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
214#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
215#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
216#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
217 (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
218#define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8)
219#define ROCKCHIP_PCIE_MSG_CODE(code) \
220 (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK)
221#define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16)
222
223#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4
224#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19)
225#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90
226#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17
227#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17)
228#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20
229#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20)
230#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16)
231#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24)
232#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
233#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
234#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
235#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
236 (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
237#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
238 (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
239#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
240 (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
241#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
242#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
243 (((devfn) << 12) & \
244 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
245#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
246#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
247 (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
248#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
249 (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
250#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
251#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
252#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
253 (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
254#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
255 (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
256#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
257 (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
258#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
259 (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
260#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
261 (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
262
263#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
264 (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
265#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \
266 (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008)
267#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
268 (GENMASK(4, 0) << ((b) * 8))
269#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
270 (((a) << ((b) * 8)) & \
271 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
272#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
273 (GENMASK(7, 5) << ((b) * 8))
274#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
275 (((c) << ((b) * 8 + 5)) & \
276 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
277
278struct rockchip_pcie {
279 void __iomem *reg_base; /* DT axi-base */
280 void __iomem *apb_base; /* DT apb-base */
281 bool legacy_phy;
282 struct phy *phys[MAX_LANE_NUM];
283 struct reset_control *core_rst;
284 struct reset_control *mgmt_rst;
285 struct reset_control *mgmt_sticky_rst;
286 struct reset_control *pipe_rst;
287 struct reset_control *pm_rst;
288 struct reset_control *aclk_rst;
289 struct reset_control *pclk_rst;
290 struct clk *aclk_pcie;
291 struct clk *aclk_perf_pcie;
292 struct clk *hclk_pcie;
293 struct clk *clk_pcie_pm;
294 struct regulator *vpcie12v; /* 12V power supply */
295 struct regulator *vpcie3v3; /* 3.3V power supply */
296 struct regulator *vpcie1v8; /* 1.8V power supply */
297 struct regulator *vpcie0v9; /* 0.9V power supply */
298 struct gpio_desc *ep_gpio;
299 u32 lanes;
300 u8 lanes_map;
301 u8 root_bus_nr;
302 int link_gen;
303 struct device *dev;
304 struct irq_domain *irq_domain;
305 int offset;
306 struct pci_bus *root_bus;
307 struct resource *io;
308 phys_addr_t io_bus_addr;
309 u32 io_size;
310 void __iomem *msg_region;
311 u32 mem_size;
312 phys_addr_t msg_bus_addr;
313 phys_addr_t mem_bus_addr;
314 bool is_rc;
315 struct resource *mem_res;
316};
317
318static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
319{
320 return readl(rockchip->apb_base + reg);
321}
322
323static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
324 u32 reg)
325{
326 writel(val, rockchip->apb_base + reg);
327}
328
329int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip);
330int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
331int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
332void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
333int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
334void rockchip_pcie_disable_clocks(void *data);
335void rockchip_pcie_cfg_configuration_accesses(
336 struct rockchip_pcie *rockchip, u32 type);
337
338#endif /* _PCIE_ROCKCHIP_H */
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4839ae578711..6a4bbb5b3de0 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -21,6 +21,8 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/irqchip/chained_irq.h> 22#include <linux/irqchip/chained_irq.h>
23 23
24#include "../pci.h"
25
24/* Bridge core config registers */ 26/* Bridge core config registers */
25#define BRCFG_PCIE_RX0 0x00000000 27#define BRCFG_PCIE_RX0 0x00000000
26#define BRCFG_INTERRUPT 0x00000010 28#define BRCFG_INTERRUPT 0x00000010
@@ -825,7 +827,6 @@ static const struct of_device_id nwl_pcie_of_match[] = {
825static int nwl_pcie_probe(struct platform_device *pdev) 827static int nwl_pcie_probe(struct platform_device *pdev)
826{ 828{
827 struct device *dev = &pdev->dev; 829 struct device *dev = &pdev->dev;
828 struct device_node *node = dev->of_node;
829 struct nwl_pcie *pcie; 830 struct nwl_pcie *pcie;
830 struct pci_bus *bus; 831 struct pci_bus *bus;
831 struct pci_bus *child; 832 struct pci_bus *child;
@@ -855,7 +856,8 @@ static int nwl_pcie_probe(struct platform_device *pdev)
855 return err; 856 return err;
856 } 857 }
857 858
858 err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase); 859 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
860 &iobase);
859 if (err) { 861 if (err) {
860 dev_err(dev, "Getting bridge resources failed\n"); 862 dev_err(dev, "Getting bridge resources failed\n");
861 return err; 863 return err;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 0ad188effc09..b110a3a814e3 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -23,6 +23,8 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25 25
26#include "../pci.h"
27
26/* Register definitions */ 28/* Register definitions */
27#define XILINX_PCIE_REG_BIR 0x00000130 29#define XILINX_PCIE_REG_BIR 0x00000130
28#define XILINX_PCIE_REG_IDR 0x00000138 30#define XILINX_PCIE_REG_IDR 0x00000138
@@ -643,8 +645,8 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
643 return err; 645 return err;
644 } 646 }
645 647
646 err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res, 648 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
647 &iobase); 649 &iobase);
648 if (err) { 650 if (err) {
649 dev_err(dev, "Getting bridge resources failed\n"); 651 dev_err(dev, "Getting bridge resources failed\n");
650 return err; 652 return err;
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 930a8fa08bd6..942b64fc7f1f 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -24,6 +24,28 @@
24#define VMD_MEMBAR1 2 24#define VMD_MEMBAR1 2
25#define VMD_MEMBAR2 4 25#define VMD_MEMBAR2 4
26 26
27#define PCI_REG_VMCAP 0x40
28#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
29#define PCI_REG_VMCONFIG 0x44
30#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
31#define PCI_REG_VMLOCK 0x70
32#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
33
34enum vmd_features {
35 /*
36 * Device may contain registers which hint the physical location of the
37 * membars, in order to allow proper address translation during
38 * resource assignment to enable guest virtualization
39 */
40 VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
41
42 /*
43 * Device may provide root port configuration information which limits
44 * bus numbering
45 */
46 VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
47};
48
27/* 49/*
28 * Lock for manipulating VMD IRQ lists. 50 * Lock for manipulating VMD IRQ lists.
29 */ 51 */
@@ -546,7 +568,7 @@ static int vmd_find_free_domain(void)
546 return domain + 1; 568 return domain + 1;
547} 569}
548 570
549static int vmd_enable_domain(struct vmd_dev *vmd) 571static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
550{ 572{
551 struct pci_sysdata *sd = &vmd->sysdata; 573 struct pci_sysdata *sd = &vmd->sysdata;
552 struct fwnode_handle *fn; 574 struct fwnode_handle *fn;
@@ -554,12 +576,57 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
554 u32 upper_bits; 576 u32 upper_bits;
555 unsigned long flags; 577 unsigned long flags;
556 LIST_HEAD(resources); 578 LIST_HEAD(resources);
579 resource_size_t offset[2] = {0};
580 resource_size_t membar2_offset = 0x2000, busn_start = 0;
581
582 /*
583 * Shadow registers may exist in certain VMD device ids which allow
584 * guests to correctly assign host physical addresses to the root ports
585 * and child devices. These registers will either return the host value
586 * or 0, depending on an enable bit in the VMD device.
587 */
588 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
589 u32 vmlock;
590 int ret;
591
592 membar2_offset = 0x2018;
593 ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
594 if (ret || vmlock == ~0)
595 return -ENODEV;
596
597 if (MB2_SHADOW_EN(vmlock)) {
598 void __iomem *membar2;
599
600 membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
601 if (!membar2)
602 return -ENOMEM;
603 offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
604 readq(membar2 + 0x2008);
605 offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
606 readq(membar2 + 0x2010);
607 pci_iounmap(vmd->dev, membar2);
608 }
609 }
610
611 /*
612 * Certain VMD devices may have a root port configuration option which
613 * limits the bus range to between 0-127 or 128-255
614 */
615 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
616 u32 vmcap, vmconfig;
617
618 pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
619 pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
620 if (BUS_RESTRICT_CAP(vmcap) &&
621 (BUS_RESTRICT_CFG(vmconfig) == 0x1))
622 busn_start = 128;
623 }
557 624
558 res = &vmd->dev->resource[VMD_CFGBAR]; 625 res = &vmd->dev->resource[VMD_CFGBAR];
559 vmd->resources[0] = (struct resource) { 626 vmd->resources[0] = (struct resource) {
560 .name = "VMD CFGBAR", 627 .name = "VMD CFGBAR",
561 .start = 0, 628 .start = busn_start,
562 .end = (resource_size(res) >> 20) - 1, 629 .end = busn_start + (resource_size(res) >> 20) - 1,
563 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, 630 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
564 }; 631 };
565 632
@@ -600,7 +667,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
600 flags &= ~IORESOURCE_MEM_64; 667 flags &= ~IORESOURCE_MEM_64;
601 vmd->resources[2] = (struct resource) { 668 vmd->resources[2] = (struct resource) {
602 .name = "VMD MEMBAR2", 669 .name = "VMD MEMBAR2",
603 .start = res->start + 0x2000, 670 .start = res->start + membar2_offset,
604 .end = res->end, 671 .end = res->end,
605 .flags = flags, 672 .flags = flags,
606 .parent = res, 673 .parent = res,
@@ -624,10 +691,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
624 return -ENODEV; 691 return -ENODEV;
625 692
626 pci_add_resource(&resources, &vmd->resources[0]); 693 pci_add_resource(&resources, &vmd->resources[0]);
627 pci_add_resource(&resources, &vmd->resources[1]); 694 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
628 pci_add_resource(&resources, &vmd->resources[2]); 695 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
629 vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd, 696
630 &resources); 697 vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
698 sd, &resources);
631 if (!vmd->bus) { 699 if (!vmd->bus) {
632 pci_free_resource_list(&resources); 700 pci_free_resource_list(&resources);
633 irq_domain_remove(vmd->irq_domain); 701 irq_domain_remove(vmd->irq_domain);
@@ -713,7 +781,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
713 781
714 spin_lock_init(&vmd->cfg_lock); 782 spin_lock_init(&vmd->cfg_lock);
715 pci_set_drvdata(dev, vmd); 783 pci_set_drvdata(dev, vmd);
716 err = vmd_enable_domain(vmd); 784 err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
717 if (err) 785 if (err)
718 return err; 786 return err;
719 787
@@ -778,7 +846,10 @@ static int vmd_resume(struct device *dev)
778static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); 846static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
779 847
780static const struct pci_device_id vmd_ids[] = { 848static const struct pci_device_id vmd_ids[] = {
781 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),}, 849 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
850 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
851 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
852 VMD_FEAT_HAS_BUS_RESTRICTIONS,},
782 {0,} 853 {0,}
783}; 854};
784MODULE_DEVICE_TABLE(pci, vmd_ids); 855MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index a8f21d051e0c..e9f78eb390d2 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -104,14 +104,11 @@ config HOTPLUG_PCI_CPCI_GENERIC
104 When in doubt, say N. 104 When in doubt, say N.
105 105
106config HOTPLUG_PCI_SHPC 106config HOTPLUG_PCI_SHPC
107 tristate "SHPC PCI Hotplug driver" 107 bool "SHPC PCI Hotplug driver"
108 help 108 help
109 Say Y here if you have a motherboard with a SHPC PCI Hotplug 109 Say Y here if you have a motherboard with a SHPC PCI Hotplug
110 controller. 110 controller.
111 111
112 To compile this driver as a module, choose M here: the
113 module will be called shpchp.
114
115 When in doubt, say N. 112 When in doubt, say N.
116 113
117config HOTPLUG_PCI_POWERNV 114config HOTPLUG_PCI_POWERNV
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index c9816166978e..3979f89b250a 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -63,22 +63,17 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
63/** 63/**
64 * acpi_get_hp_hw_control_from_firmware 64 * acpi_get_hp_hw_control_from_firmware
65 * @dev: the pci_dev of the bridge that has a hotplug controller 65 * @dev: the pci_dev of the bridge that has a hotplug controller
66 * @flags: requested control bits for _OSC
67 * 66 *
68 * Attempt to take hotplug control from firmware. 67 * Attempt to take hotplug control from firmware.
69 */ 68 */
70int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) 69int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
71{ 70{
71 const struct pci_host_bridge *host;
72 const struct acpi_pci_root *root;
72 acpi_status status; 73 acpi_status status;
73 acpi_handle chandle, handle; 74 acpi_handle chandle, handle;
74 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 75 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
75 76
76 flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
77 if (!flags) {
78 err("Invalid flags %u specified!\n", flags);
79 return -EINVAL;
80 }
81
82 /* 77 /*
83 * Per PCI firmware specification, we should run the ACPI _OSC 78 * Per PCI firmware specification, we should run the ACPI _OSC
84 * method to get control of hotplug hardware before using it. If 79 * method to get control of hotplug hardware before using it. If
@@ -88,25 +83,20 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
88 * OSHP within the scope of the hotplug controller and its parents, 83 * OSHP within the scope of the hotplug controller and its parents,
89 * up to the host bridge under which this controller exists. 84 * up to the host bridge under which this controller exists.
90 */ 85 */
91 handle = acpi_find_root_bridge_handle(pdev); 86 if (shpchp_is_native(pdev))
92 if (handle) { 87 return 0;
93 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 88
94 dbg("Trying to get hotplug control for %s\n", 89 /* If _OSC exists, we should not evaluate OSHP */
95 (char *)string.pointer); 90 host = pci_find_host_bridge(pdev->bus);
96 status = acpi_pci_osc_control_set(handle, &flags, flags); 91 root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
97 if (ACPI_SUCCESS(status)) 92 if (root->osc_support_set)
98 goto got_one; 93 goto no_control;
99 if (status == AE_SUPPORT)
100 goto no_control;
101 kfree(string.pointer);
102 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
103 }
104 94
105 handle = ACPI_HANDLE(&pdev->dev); 95 handle = ACPI_HANDLE(&pdev->dev);
106 if (!handle) { 96 if (!handle) {
107 /* 97 /*
108 * This hotplug controller was not listed in the ACPI name 98 * This hotplug controller was not listed in the ACPI name
109 * space at all. Try to get acpi handle of parent pci bus. 99 * space at all. Try to get ACPI handle of parent PCI bus.
110 */ 100 */
111 struct pci_bus *pbus; 101 struct pci_bus *pbus;
112 for (pbus = pdev->bus; pbus; pbus = pbus->parent) { 102 for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
@@ -118,8 +108,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
118 108
119 while (handle) { 109 while (handle) {
120 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 110 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
121 dbg("Trying to get hotplug control for %s\n", 111 pci_info(pdev, "Requesting control of SHPC hotplug via OSHP (%s)\n",
122 (char *)string.pointer); 112 (char *)string.pointer);
123 status = acpi_run_oshp(handle); 113 status = acpi_run_oshp(handle);
124 if (ACPI_SUCCESS(status)) 114 if (ACPI_SUCCESS(status))
125 goto got_one; 115 goto got_one;
@@ -131,13 +121,12 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
131 break; 121 break;
132 } 122 }
133no_control: 123no_control:
134 dbg("Cannot get control of hotplug hardware for pci %s\n", 124 pci_info(pdev, "Cannot get control of SHPC hotplug\n");
135 pci_name(pdev));
136 kfree(string.pointer); 125 kfree(string.pointer);
137 return -ENODEV; 126 return -ENODEV;
138got_one: 127got_one:
139 dbg("Gained control for hotplug HW for pci %s (%s)\n", 128 pci_info(pdev, "Gained control of SHPC hotplug (%s)\n",
140 pci_name(pdev), (char *)string.pointer); 129 (char *)string.pointer);
141 kfree(string.pointer); 130 kfree(string.pointer);
142 return 0; 131 return 0;
143} 132}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index b45b375c0e6c..3a17b290df5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -287,11 +287,12 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
287 /* 287 /*
288 * Expose slots to user space for functions that have _EJ0 or _RMV or 288 * Expose slots to user space for functions that have _EJ0 or _RMV or
289 * are located in dock stations. Do not expose them for devices handled 289 * are located in dock stations. Do not expose them for devices handled
290 * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to 290 * by the native PCIe hotplug (PCIeHP) or standard PCI hotplug
291 * expose slots to user space in those cases. 291 * (SHPCHP), because that code is supposed to expose slots to user
292 * space in those cases.
292 */ 293 */
293 if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev)) 294 if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
294 && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) { 295 && !(pdev && hotplug_is_native(pdev))) {
295 unsigned long long sun; 296 unsigned long long sun;
296 int retval; 297 int retval;
297 298
@@ -430,6 +431,29 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
430 return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0)); 431 return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
431} 432}
432 433
434static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
435{
436 struct pci_bus *bus = bridge->subordinate;
437 struct pci_dev *dev;
438 int max;
439
440 if (!bus)
441 return;
442
443 max = bus->busn_res.start;
444 /* Scan already configured non-hotplug bridges */
445 for_each_pci_bridge(dev, bus) {
446 if (!hotplug_is_native(dev))
447 max = pci_scan_bridge(bus, dev, max, 0);
448 }
449
450 /* Scan non-hotplug bridges that need to be reconfigured */
451 for_each_pci_bridge(dev, bus) {
452 if (!hotplug_is_native(dev))
453 max = pci_scan_bridge(bus, dev, max, 1);
454 }
455}
456
433/** 457/**
434 * enable_slot - enable, configure a slot 458 * enable_slot - enable, configure a slot
435 * @slot: slot to be enabled 459 * @slot: slot to be enabled
@@ -442,25 +466,42 @@ static void enable_slot(struct acpiphp_slot *slot)
442 struct pci_dev *dev; 466 struct pci_dev *dev;
443 struct pci_bus *bus = slot->bus; 467 struct pci_bus *bus = slot->bus;
444 struct acpiphp_func *func; 468 struct acpiphp_func *func;
445 int max, pass;
446 LIST_HEAD(add_list);
447 469
448 acpiphp_rescan_slot(slot); 470 if (bus->self && hotplug_is_native(bus->self)) {
449 max = acpiphp_max_busnr(bus); 471 /*
450 for (pass = 0; pass < 2; pass++) { 472 * If native hotplug is used, it will take care of hotplug
473 * slot management and resource allocation for hotplug
474 * bridges. However, ACPI hotplug may still be used for
475 * non-hotplug bridges to bring in additional devices such
476 * as a Thunderbolt host controller.
477 */
451 for_each_pci_bridge(dev, bus) { 478 for_each_pci_bridge(dev, bus) {
452 if (PCI_SLOT(dev->devfn) != slot->device) 479 if (PCI_SLOT(dev->devfn) == slot->device)
453 continue; 480 acpiphp_native_scan_bridge(dev);
454 481 }
455 max = pci_scan_bridge(bus, dev, max, pass); 482 pci_assign_unassigned_bridge_resources(bus->self);
456 if (pass && dev->subordinate) { 483 } else {
457 check_hotplug_bridge(slot, dev); 484 LIST_HEAD(add_list);
458 pcibios_resource_survey_bus(dev->subordinate); 485 int max, pass;
459 __pci_bus_size_bridges(dev->subordinate, &add_list); 486
487 acpiphp_rescan_slot(slot);
488 max = acpiphp_max_busnr(bus);
489 for (pass = 0; pass < 2; pass++) {
490 for_each_pci_bridge(dev, bus) {
491 if (PCI_SLOT(dev->devfn) != slot->device)
492 continue;
493
494 max = pci_scan_bridge(bus, dev, max, pass);
495 if (pass && dev->subordinate) {
496 check_hotplug_bridge(slot, dev);
497 pcibios_resource_survey_bus(dev->subordinate);
498 __pci_bus_size_bridges(dev->subordinate,
499 &add_list);
500 }
460 } 501 }
461 } 502 }
503 __pci_bus_assign_resources(bus, &add_list, NULL);
462 } 504 }
463 __pci_bus_assign_resources(bus, &add_list, NULL);
464 505
465 acpiphp_sanitize_bus(bus); 506 acpiphp_sanitize_bus(bus);
466 pcie_bus_configure_settings(bus); 507 pcie_bus_configure_settings(bus);
@@ -481,7 +522,7 @@ static void enable_slot(struct acpiphp_slot *slot)
481 if (!dev) { 522 if (!dev) {
482 /* Do not set SLOT_ENABLED flag if some funcs 523 /* Do not set SLOT_ENABLED flag if some funcs
483 are not added. */ 524 are not added. */
484 slot->flags &= (~SLOT_ENABLED); 525 slot->flags &= ~SLOT_ENABLED;
485 continue; 526 continue;
486 } 527 }
487 } 528 }
@@ -510,7 +551,7 @@ static void disable_slot(struct acpiphp_slot *slot)
510 list_for_each_entry(func, &slot->funcs, sibling) 551 list_for_each_entry(func, &slot->funcs, sibling)
511 acpi_bus_trim(func_to_acpi_device(func)); 552 acpi_bus_trim(func_to_acpi_device(func));
512 553
513 slot->flags &= (~SLOT_ENABLED); 554 slot->flags &= ~SLOT_ENABLED;
514} 555}
515 556
516static bool slot_no_hotplug(struct acpiphp_slot *slot) 557static bool slot_no_hotplug(struct acpiphp_slot *slot)
@@ -608,6 +649,11 @@ static void trim_stale_devices(struct pci_dev *dev)
608 alive = pci_device_is_present(dev); 649 alive = pci_device_is_present(dev);
609 650
610 if (!alive) { 651 if (!alive) {
652 pci_dev_set_disconnected(dev, NULL);
653 if (pci_has_subordinate(dev))
654 pci_walk_bus(dev->subordinate, pci_dev_set_disconnected,
655 NULL);
656
611 pci_stop_and_remove_bus_device(dev); 657 pci_stop_and_remove_bus_device(dev);
612 if (adev) 658 if (adev)
613 acpi_bus_trim(adev); 659 acpi_bus_trim(adev);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index b81ca3fa0e84..1869b0411ce0 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -379,7 +379,7 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
379 379
380static int get_max_bus_speed(struct slot *slot) 380static int get_max_bus_speed(struct slot *slot)
381{ 381{
382 int rc; 382 int rc = 0;
383 u8 mode = 0; 383 u8 mode = 0;
384 enum pci_bus_speed speed; 384 enum pci_bus_speed speed;
385 struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus; 385 struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 88e917c9120f..5f892065585e 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -121,7 +121,7 @@ struct controller *pcie_init(struct pcie_device *dev);
121int pcie_init_notification(struct controller *ctrl); 121int pcie_init_notification(struct controller *ctrl);
122int pciehp_enable_slot(struct slot *p_slot); 122int pciehp_enable_slot(struct slot *p_slot);
123int pciehp_disable_slot(struct slot *p_slot); 123int pciehp_disable_slot(struct slot *p_slot);
124void pcie_enable_notification(struct controller *ctrl); 124void pcie_reenable_notification(struct controller *ctrl);
125int pciehp_power_on_slot(struct slot *slot); 125int pciehp_power_on_slot(struct slot *slot);
126void pciehp_power_off_slot(struct slot *slot); 126void pciehp_power_off_slot(struct slot *slot);
127void pciehp_get_power_status(struct slot *slot, u8 *status); 127void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 332b723ff9e6..44a6a63802d5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -283,7 +283,7 @@ static int pciehp_resume(struct pcie_device *dev)
283 ctrl = get_service_data(dev); 283 ctrl = get_service_data(dev);
284 284
285 /* reinitialize the chipset's event detection logic */ 285 /* reinitialize the chipset's event detection logic */
286 pcie_enable_notification(ctrl); 286 pcie_reenable_notification(ctrl);
287 287
288 slot = ctrl->slot; 288 slot = ctrl->slot;
289 289
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 18a42f8f5dc5..718b6073afad 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -10,7 +10,6 @@
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com> 12 * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
13 *
14 */ 13 */
15 14
16#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -147,25 +146,22 @@ static void pcie_wait_cmd(struct controller *ctrl)
147 else 146 else
148 rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout)); 147 rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
149 148
150 /*
151 * Controllers with errata like Intel CF118 don't generate
152 * completion notifications unless the power/indicator/interlock
153 * control bits are changed. On such controllers, we'll emit this
154 * timeout message when we wait for completion of commands that
155 * don't change those bits, e.g., commands that merely enable
156 * interrupts.
157 */
158 if (!rc) 149 if (!rc)
159 ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n", 150 ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
160 ctrl->slot_ctrl, 151 ctrl->slot_ctrl,
161 jiffies_to_msecs(jiffies - ctrl->cmd_started)); 152 jiffies_to_msecs(jiffies - ctrl->cmd_started));
162} 153}
163 154
155#define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
156 PCI_EXP_SLTCTL_PIC | \
157 PCI_EXP_SLTCTL_AIC | \
158 PCI_EXP_SLTCTL_EIC)
159
164static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, 160static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
165 u16 mask, bool wait) 161 u16 mask, bool wait)
166{ 162{
167 struct pci_dev *pdev = ctrl_dev(ctrl); 163 struct pci_dev *pdev = ctrl_dev(ctrl);
168 u16 slot_ctrl; 164 u16 slot_ctrl_orig, slot_ctrl;
169 165
170 mutex_lock(&ctrl->ctrl_lock); 166 mutex_lock(&ctrl->ctrl_lock);
171 167
@@ -180,6 +176,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
180 goto out; 176 goto out;
181 } 177 }
182 178
179 slot_ctrl_orig = slot_ctrl;
183 slot_ctrl &= ~mask; 180 slot_ctrl &= ~mask;
184 slot_ctrl |= (cmd & mask); 181 slot_ctrl |= (cmd & mask);
185 ctrl->cmd_busy = 1; 182 ctrl->cmd_busy = 1;
@@ -189,6 +186,17 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
189 ctrl->slot_ctrl = slot_ctrl; 186 ctrl->slot_ctrl = slot_ctrl;
190 187
191 /* 188 /*
189 * Controllers with the Intel CF118 and similar errata advertise
190 * Command Completed support, but they only set Command Completed
191 * if we change the "Control" bits for power, power indicator,
192 * attention indicator, or interlock. If we only change the
193 * "Enable" bits, they never set the Command Completed bit.
194 */
195 if (pdev->broken_cmd_compl &&
196 (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
197 ctrl->cmd_busy = 0;
198
199 /*
192 * Optionally wait for the hardware to be ready for a new command, 200 * Optionally wait for the hardware to be ready for a new command,
193 * indicating completion of the above issued command. 201 * indicating completion of the above issued command.
194 */ 202 */
@@ -231,25 +239,11 @@ bool pciehp_check_link_active(struct controller *ctrl)
231 return ret; 239 return ret;
232} 240}
233 241
234static void __pcie_wait_link_active(struct controller *ctrl, bool active)
235{
236 int timeout = 1000;
237
238 if (pciehp_check_link_active(ctrl) == active)
239 return;
240 while (timeout > 0) {
241 msleep(10);
242 timeout -= 10;
243 if (pciehp_check_link_active(ctrl) == active)
244 return;
245 }
246 ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
247 active ? "set" : "cleared");
248}
249
250static void pcie_wait_link_active(struct controller *ctrl) 242static void pcie_wait_link_active(struct controller *ctrl)
251{ 243{
252 __pcie_wait_link_active(ctrl, true); 244 struct pci_dev *pdev = ctrl_dev(ctrl);
245
246 pcie_wait_for_link(pdev, true);
253} 247}
254 248
255static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) 249static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
@@ -659,7 +653,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
659 return handled; 653 return handled;
660} 654}
661 655
662void pcie_enable_notification(struct controller *ctrl) 656static void pcie_enable_notification(struct controller *ctrl)
663{ 657{
664 u16 cmd, mask; 658 u16 cmd, mask;
665 659
@@ -697,6 +691,17 @@ void pcie_enable_notification(struct controller *ctrl)
697 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); 691 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
698} 692}
699 693
694void pcie_reenable_notification(struct controller *ctrl)
695{
696 /*
697 * Clear both Presence and Data Link Layer Changed to make sure
698 * those events still fire after we have re-enabled them.
699 */
700 pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
701 PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
702 pcie_enable_notification(ctrl);
703}
704
700static void pcie_disable_notification(struct controller *ctrl) 705static void pcie_disable_notification(struct controller *ctrl)
701{ 706{
702 u16 mask; 707 u16 mask;
@@ -861,7 +866,7 @@ struct controller *pcie_init(struct pcie_device *dev)
861 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC | 866 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
862 PCI_EXP_SLTSTA_DLLSC); 867 PCI_EXP_SLTSTA_DLLSC);
863 868
864 ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n", 869 ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
865 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, 870 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
866 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), 871 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
867 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP), 872 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
@@ -872,7 +877,8 @@ struct controller *pcie_init(struct pcie_device *dev)
872 FLAG(slot_cap, PCI_EXP_SLTCAP_HPS), 877 FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
873 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), 878 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
874 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), 879 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
875 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC)); 880 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
881 pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
876 882
877 if (pcie_init_slot(ctrl)) 883 if (pcie_init_slot(ctrl))
878 goto abort_ctrl; 884 goto abort_ctrl;
@@ -891,3 +897,21 @@ void pciehp_release_ctrl(struct controller *ctrl)
891 pcie_cleanup_slot(ctrl); 897 pcie_cleanup_slot(ctrl);
892 kfree(ctrl); 898 kfree(ctrl);
893} 899}
900
901static void quirk_cmd_compl(struct pci_dev *pdev)
902{
903 u32 slot_cap;
904
905 if (pci_is_pcie(pdev)) {
906 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
907 if (slot_cap & PCI_EXP_SLTCAP_HPC &&
908 !(slot_cap & PCI_EXP_SLTCAP_NCCS))
909 pdev->broken_cmd_compl = 1;
910 }
911}
912DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
913 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
914DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
915 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
916DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
917 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index d44100687dfe..6c2e8d7307c6 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -220,12 +220,16 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
220 220
221 for_each_child_of_node(dn, child) { 221 for_each_child_of_node(dn, child) {
222 ret = of_changeset_attach_node(ocs, child); 222 ret = of_changeset_attach_node(ocs, child);
223 if (ret) 223 if (ret) {
224 of_node_put(child);
224 break; 225 break;
226 }
225 227
226 ret = pnv_php_populate_changeset(ocs, child); 228 ret = pnv_php_populate_changeset(ocs, child);
227 if (ret) 229 if (ret) {
230 of_node_put(child);
228 break; 231 break;
232 }
229 } 233 }
230 234
231 return ret; 235 return ret;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index c55730b61c9a..516e4835019c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -105,7 +105,6 @@ struct controller {
105}; 105};
106 106
107/* Define AMD SHPC ID */ 107/* Define AMD SHPC ID */
108#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
109#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 108#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
110 109
111/* AMD PCI-X bridge registers */ 110/* AMD PCI-X bridge registers */
@@ -173,17 +172,6 @@ static inline const char *slot_name(struct slot *slot)
173 return hotplug_slot_name(slot->hotplug_slot); 172 return hotplug_slot_name(slot->hotplug_slot);
174} 173}
175 174
176#ifdef CONFIG_ACPI
177#include <linux/pci-acpi.h>
178static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
179{
180 u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
181 return acpi_get_hp_hw_control_from_firmware(dev, flags);
182}
183#else
184#define get_hp_hw_control_from_firmware(dev) (0)
185#endif
186
187struct ctrl_reg { 175struct ctrl_reg {
188 volatile u32 base_offset; 176 volatile u32 base_offset;
189 volatile u32 slot_avail1; 177 volatile u32 slot_avail1;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 1f0f96908b5a..e91be287f292 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -270,24 +270,12 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
270 return 0; 270 return 0;
271} 271}
272 272
273static int is_shpc_capable(struct pci_dev *dev)
274{
275 if (dev->vendor == PCI_VENDOR_ID_AMD &&
276 dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
277 return 1;
278 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
279 return 0;
280 if (get_hp_hw_control_from_firmware(dev))
281 return 0;
282 return 1;
283}
284
285static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 273static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
286{ 274{
287 int rc; 275 int rc;
288 struct controller *ctrl; 276 struct controller *ctrl;
289 277
290 if (!is_shpc_capable(pdev)) 278 if (acpi_get_hp_hw_control_from_firmware(pdev))
291 return -ENODEV; 279 return -ENODEV;
292 280
293 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 281 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index bedda5bda910..1047b56e5730 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -585,13 +585,13 @@ static int shpchp_enable_slot (struct slot *p_slot)
585 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); 585 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
586 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 586 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
587 587
588 if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) || 588 if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
589 (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)) 589 p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
590 && p_slot->ctrl->num_slots == 1) { 590 && p_slot->ctrl->num_slots == 1) {
591 /* handle amd pogo errata; this must be done before enable */ 591 /* handle AMD POGO errata; this must be done before enable */
592 amd_pogo_errata_save_misc_reg(p_slot); 592 amd_pogo_errata_save_misc_reg(p_slot);
593 retval = board_added(p_slot); 593 retval = board_added(p_slot);
594 /* handle amd pogo errata; this must be done after enable */ 594 /* handle AMD POGO errata; this must be done after enable */
595 amd_pogo_errata_restore_misc_reg(p_slot); 595 amd_pogo_errata_restore_misc_reg(p_slot);
596 } else 596 } else
597 retval = board_added(p_slot); 597 retval = board_added(p_slot);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 8adf4a64f291..d0d73dbbd5ca 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -469,6 +469,7 @@ found:
469 iov->nres = nres; 469 iov->nres = nres;
470 iov->ctrl = ctrl; 470 iov->ctrl = ctrl;
471 iov->total_VFs = total; 471 iov->total_VFs = total;
472 iov->driver_max_VFs = total;
472 pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device); 473 pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
473 iov->pgsz = pgsz; 474 iov->pgsz = pgsz;
474 iov->self = dev; 475 iov->self = dev;
@@ -827,9 +828,42 @@ int pci_sriov_get_totalvfs(struct pci_dev *dev)
827 if (!dev->is_physfn) 828 if (!dev->is_physfn)
828 return 0; 829 return 0;
829 830
830 if (dev->sriov->driver_max_VFs) 831 return dev->sriov->driver_max_VFs;
831 return dev->sriov->driver_max_VFs;
832
833 return dev->sriov->total_VFs;
834} 832}
835EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs); 833EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
834
835/**
836 * pci_sriov_configure_simple - helper to configure SR-IOV
837 * @dev: the PCI device
838 * @nr_virtfn: number of virtual functions to enable, 0 to disable
839 *
840 * Enable or disable SR-IOV for devices that don't require any PF setup
841 * before enabling SR-IOV. Return value is negative on error, or number of
842 * VFs allocated on success.
843 */
844int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
845{
846 int rc;
847
848 might_sleep();
849
850 if (!dev->is_physfn)
851 return -ENODEV;
852
853 if (pci_vfs_assigned(dev)) {
854 pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
855 return -EPERM;
856 }
857
858 if (nr_virtfn == 0) {
859 sriov_disable(dev);
860 return 0;
861 }
862
863 rc = sriov_enable(dev, nr_virtfn);
864 if (rc < 0)
865 return rc;
866
867 return nr_virtfn;
868}
869EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a28355c273ae..d088c9147f10 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -244,8 +244,9 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
244 244
245#if defined(CONFIG_OF_ADDRESS) 245#if defined(CONFIG_OF_ADDRESS)
246/** 246/**
247 * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT 247 * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
248 * @dev: device node of the host bridge having the range property 248 * host bridge resources from DT
249 * @dev: host bridge device
249 * @busno: bus number associated with the bridge root bus 250 * @busno: bus number associated with the bridge root bus
250 * @bus_max: maximum number of buses for this bridge 251 * @bus_max: maximum number of buses for this bridge
251 * @resources: list where the range of resources will be added after DT parsing 252 * @resources: list where the range of resources will be added after DT parsing
@@ -253,8 +254,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
253 * address for the start of the I/O range. Can be NULL if the caller doesn't 254 * address for the start of the I/O range. Can be NULL if the caller doesn't
254 * expect I/O ranges to be present in the device tree. 255 * expect I/O ranges to be present in the device tree.
255 * 256 *
256 * It is the caller's job to free the @resources list.
257 *
258 * This function will parse the "ranges" property of a PCI host bridge device 257 * This function will parse the "ranges" property of a PCI host bridge device
259 * node and setup the resource mapping based on its content. It is expected 258 * node and setup the resource mapping based on its content. It is expected
260 * that the property conforms with the Power ePAPR document. 259 * that the property conforms with the Power ePAPR document.
@@ -262,11 +261,11 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
262 * It returns zero if the range parsing has been successful or a standard error 261 * It returns zero if the range parsing has been successful or a standard error
263 * value if it failed. 262 * value if it failed.
264 */ 263 */
265int of_pci_get_host_bridge_resources(struct device_node *dev, 264int devm_of_pci_get_host_bridge_resources(struct device *dev,
266 unsigned char busno, unsigned char bus_max, 265 unsigned char busno, unsigned char bus_max,
267 struct list_head *resources, resource_size_t *io_base) 266 struct list_head *resources, resource_size_t *io_base)
268{ 267{
269 struct resource_entry *window; 268 struct device_node *dev_node = dev->of_node;
270 struct resource *res; 269 struct resource *res;
271 struct resource *bus_range; 270 struct resource *bus_range;
272 struct of_pci_range range; 271 struct of_pci_range range;
@@ -277,19 +276,19 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
277 if (io_base) 276 if (io_base)
278 *io_base = (resource_size_t)OF_BAD_ADDR; 277 *io_base = (resource_size_t)OF_BAD_ADDR;
279 278
280 bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL); 279 bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
281 if (!bus_range) 280 if (!bus_range)
282 return -ENOMEM; 281 return -ENOMEM;
283 282
284 pr_info("host bridge %pOF ranges:\n", dev); 283 dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
285 284
286 err = of_pci_parse_bus_range(dev, bus_range); 285 err = of_pci_parse_bus_range(dev_node, bus_range);
287 if (err) { 286 if (err) {
288 bus_range->start = busno; 287 bus_range->start = busno;
289 bus_range->end = bus_max; 288 bus_range->end = bus_max;
290 bus_range->flags = IORESOURCE_BUS; 289 bus_range->flags = IORESOURCE_BUS;
291 pr_info(" No bus range found for %pOF, using %pR\n", 290 dev_info(dev, " No bus range found for %pOF, using %pR\n",
292 dev, bus_range); 291 dev_node, bus_range);
293 } else { 292 } else {
294 if (bus_range->end > bus_range->start + bus_max) 293 if (bus_range->end > bus_range->start + bus_max)
295 bus_range->end = bus_range->start + bus_max; 294 bus_range->end = bus_range->start + bus_max;
@@ -297,11 +296,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
297 pci_add_resource(resources, bus_range); 296 pci_add_resource(resources, bus_range);
298 297
299 /* Check for ranges property */ 298 /* Check for ranges property */
300 err = of_pci_range_parser_init(&parser, dev); 299 err = of_pci_range_parser_init(&parser, dev_node);
301 if (err) 300 if (err)
302 goto parse_failed; 301 goto failed;
303 302
304 pr_debug("Parsing ranges property...\n"); 303 dev_dbg(dev, "Parsing ranges property...\n");
305 for_each_of_pci_range(&parser, &range) { 304 for_each_of_pci_range(&parser, &range) {
306 /* Read next ranges element */ 305 /* Read next ranges element */
307 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) 306 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
@@ -310,9 +309,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
310 snprintf(range_type, 4, "MEM"); 309 snprintf(range_type, 4, "MEM");
311 else 310 else
312 snprintf(range_type, 4, "err"); 311 snprintf(range_type, 4, "err");
313 pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type, 312 dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
314 range.cpu_addr, range.cpu_addr + range.size - 1, 313 range_type, range.cpu_addr,
315 range.pci_addr); 314 range.cpu_addr + range.size - 1, range.pci_addr);
316 315
317 /* 316 /*
318 * If we failed translation or got a zero-sized region 317 * If we failed translation or got a zero-sized region
@@ -321,28 +320,28 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
321 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) 320 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
322 continue; 321 continue;
323 322
324 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 323 res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
325 if (!res) { 324 if (!res) {
326 err = -ENOMEM; 325 err = -ENOMEM;
327 goto parse_failed; 326 goto failed;
328 } 327 }
329 328
330 err = of_pci_range_to_resource(&range, dev, res); 329 err = of_pci_range_to_resource(&range, dev_node, res);
331 if (err) { 330 if (err) {
332 kfree(res); 331 devm_kfree(dev, res);
333 continue; 332 continue;
334 } 333 }
335 334
336 if (resource_type(res) == IORESOURCE_IO) { 335 if (resource_type(res) == IORESOURCE_IO) {
337 if (!io_base) { 336 if (!io_base) {
338 pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n", 337 dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
339 dev); 338 dev_node);
340 err = -EINVAL; 339 err = -EINVAL;
341 goto conversion_failed; 340 goto failed;
342 } 341 }
343 if (*io_base != (resource_size_t)OF_BAD_ADDR) 342 if (*io_base != (resource_size_t)OF_BAD_ADDR)
344 pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n", 343 dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
345 dev); 344 dev_node);
346 *io_base = range.cpu_addr; 345 *io_base = range.cpu_addr;
347 } 346 }
348 347
@@ -351,15 +350,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
351 350
352 return 0; 351 return 0;
353 352
354conversion_failed: 353failed:
355 kfree(res);
356parse_failed:
357 resource_list_for_each_entry(window, resources)
358 kfree(window->res);
359 pci_free_resource_list(resources); 354 pci_free_resource_list(resources);
360 return err; 355 return err;
361} 356}
362EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); 357EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
363#endif /* CONFIG_OF_ADDRESS */ 358#endif /* CONFIG_OF_ADDRESS */
364 359
365/** 360/**
@@ -599,12 +594,12 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
599 struct resource **bus_range) 594 struct resource **bus_range)
600{ 595{
601 int err, res_valid = 0; 596 int err, res_valid = 0;
602 struct device_node *np = dev->of_node;
603 resource_size_t iobase; 597 resource_size_t iobase;
604 struct resource_entry *win, *tmp; 598 struct resource_entry *win, *tmp;
605 599
606 INIT_LIST_HEAD(resources); 600 INIT_LIST_HEAD(resources);
607 err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase); 601 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
602 &iobase);
608 if (err) 603 if (err)
609 return err; 604 return err;
610 605
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1abdbf267c19..65113b6eed14 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -370,26 +370,57 @@ EXPORT_SYMBOL_GPL(pci_get_hp_params);
370 370
371/** 371/**
372 * pciehp_is_native - Check whether a hotplug port is handled by the OS 372 * pciehp_is_native - Check whether a hotplug port is handled by the OS
373 * @pdev: Hotplug port to check 373 * @bridge: Hotplug port to check
374 * 374 *
375 * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field 375 * Returns true if the given @bridge is handled by the native PCIe hotplug
376 * and return the value of the "PCI Express Native Hot Plug control" bit. 376 * driver.
377 * On failure to obtain the _OSC Control Field return %false.
378 */ 377 */
379bool pciehp_is_native(struct pci_dev *pdev) 378bool pciehp_is_native(struct pci_dev *bridge)
380{ 379{
381 struct acpi_pci_root *root; 380 const struct pci_host_bridge *host;
382 acpi_handle handle; 381 u32 slot_cap;
383 382
384 handle = acpi_find_root_bridge_handle(pdev); 383 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
385 if (!handle)
386 return false; 384 return false;
387 385
388 root = acpi_pci_find_root(handle); 386 pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
389 if (!root) 387 if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
388 return false;
389
390 if (pcie_ports_native)
391 return true;
392
393 host = pci_find_host_bridge(bridge->bus);
394 return host->native_pcie_hotplug;
395}
396
397/**
398 * shpchp_is_native - Check whether a hotplug port is handled by the OS
399 * @bridge: Hotplug port to check
400 *
401 * Returns true if the given @bridge is handled by the native SHPC hotplug
402 * driver.
403 */
404bool shpchp_is_native(struct pci_dev *bridge)
405{
406 const struct pci_host_bridge *host;
407
408 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
409 return false;
410
411 /*
412 * It is assumed that AMD GOLAM chips support SHPC but they do not
413 * have SHPC capability.
414 */
415 if (bridge->vendor == PCI_VENDOR_ID_AMD &&
416 bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
417 return true;
418
419 if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
390 return false; 420 return false;
391 421
392 return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL; 422 host = pci_find_host_bridge(bridge->bus);
423 return host->native_shpc_hotplug;
393} 424}
394 425
395/** 426/**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 18ba62c76480..c125d53033c6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1539,7 +1539,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
1539 return 0; 1539 return 0;
1540} 1540}
1541 1541
1542#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) 1542#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
1543/** 1543/**
1544 * pci_uevent_ers - emit a uevent during recovery path of PCI device 1544 * pci_uevent_ers - emit a uevent during recovery path of PCI device
1545 * @pdev: PCI device undergoing error recovery 1545 * @pdev: PCI device undergoing error recovery
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
new file mode 100644
index 000000000000..9795649fc6f9
--- /dev/null
+++ b/drivers/pci/pci-pf-stub.c
@@ -0,0 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0
2/* pci-pf-stub - simple stub driver for PCI SR-IOV PF device
3 *
4 * This driver is meant to act as a "whitelist" for devices that provde
5 * SR-IOV functionality while at the same time not actually needing a
6 * driver of their own.
7 */
8
9#include <linux/module.h>
10#include <linux/pci.h>
11
12/**
13 * pci_pf_stub_whitelist - White list of devices to bind pci-pf-stub onto
14 *
15 * This table provides the list of IDs this driver is supposed to bind
16 * onto. You could think of this as a list of "quirked" devices where we
17 * are adding support for SR-IOV here since there are no other drivers
18 * that they would be running under.
19 */
20static const struct pci_device_id pci_pf_stub_whitelist[] = {
21 { PCI_VDEVICE(AMAZON, 0x0053) },
22 /* required last entry */
23 { 0 }
24};
25MODULE_DEVICE_TABLE(pci, pci_pf_stub_whitelist);
26
27static int pci_pf_stub_probe(struct pci_dev *dev,
28 const struct pci_device_id *id)
29{
30 pci_info(dev, "claimed by pci-pf-stub\n");
31 return 0;
32}
33
34static struct pci_driver pf_stub_driver = {
35 .name = "pci-pf-stub",
36 .id_table = pci_pf_stub_whitelist,
37 .probe = pci_pf_stub_probe,
38 .sriov_configure = pci_sriov_configure_simple,
39};
40
41static int __init pci_pf_stub_init(void)
42{
43 return pci_register_driver(&pf_stub_driver);
44}
45
46static void __exit pci_pf_stub_exit(void)
47{
48 pci_unregister_driver(&pf_stub_driver);
49}
50
51module_init(pci_pf_stub_init);
52module_exit(pci_pf_stub_exit);
53
54MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 366d93af051d..788a200fb2dc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -288,13 +288,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
288 if (!capable(CAP_SYS_ADMIN)) 288 if (!capable(CAP_SYS_ADMIN))
289 return -EPERM; 289 return -EPERM;
290 290
291 if (!val) { 291 device_lock(dev);
292 if (pci_is_enabled(pdev)) 292 if (dev->driver)
293 pci_disable_device(pdev); 293 result = -EBUSY;
294 else 294 else if (val)
295 result = -EIO;
296 } else
297 result = pci_enable_device(pdev); 295 result = pci_enable_device(pdev);
296 else if (pci_is_enabled(pdev))
297 pci_disable_device(pdev);
298 else
299 result = -EIO;
300 device_unlock(dev);
298 301
299 return result < 0 ? result : count; 302 return result < 0 ? result : count;
300} 303}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e90cf5c32e14..97acba712e4e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -112,6 +112,14 @@ unsigned int pcibios_max_latency = 255;
112/* If set, the PCIe ARI capability will not be used. */ 112/* If set, the PCIe ARI capability will not be used. */
113static bool pcie_ari_disabled; 113static bool pcie_ari_disabled;
114 114
115/* If set, the PCIe ATS capability will not be used. */
116static bool pcie_ats_disabled;
117
118bool pci_ats_disabled(void)
119{
120 return pcie_ats_disabled;
121}
122
115/* Disable bridge_d3 for all PCIe ports */ 123/* Disable bridge_d3 for all PCIe ports */
116static bool pci_bridge_d3_disable; 124static bool pci_bridge_d3_disable;
117/* Force bridge_d3 for all PCIe ports */ 125/* Force bridge_d3 for all PCIe ports */
@@ -4153,6 +4161,35 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
4153 4161
4154 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS); 4162 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4155} 4163}
4164/**
4165 * pcie_wait_for_link - Wait until link is active or inactive
4166 * @pdev: Bridge device
4167 * @active: waiting for active or inactive?
4168 *
4169 * Use this to wait till link becomes active or inactive.
4170 */
4171bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4172{
4173 int timeout = 1000;
4174 bool ret;
4175 u16 lnk_status;
4176
4177 for (;;) {
4178 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4179 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4180 if (ret == active)
4181 return true;
4182 if (timeout <= 0)
4183 break;
4184 msleep(10);
4185 timeout -= 10;
4186 }
4187
4188 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4189 active ? "set" : "cleared");
4190
4191 return false;
4192}
4156 4193
4157void pci_reset_secondary_bus(struct pci_dev *dev) 4194void pci_reset_secondary_bus(struct pci_dev *dev)
4158{ 4195{
@@ -5085,49 +5122,6 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
5085EXPORT_SYMBOL(pcie_set_mps); 5122EXPORT_SYMBOL(pcie_set_mps);
5086 5123
5087/** 5124/**
5088 * pcie_get_minimum_link - determine minimum link settings of a PCI device
5089 * @dev: PCI device to query
5090 * @speed: storage for minimum speed
5091 * @width: storage for minimum width
5092 *
5093 * This function will walk up the PCI device chain and determine the minimum
5094 * link width and speed of the device.
5095 */
5096int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
5097 enum pcie_link_width *width)
5098{
5099 int ret;
5100
5101 *speed = PCI_SPEED_UNKNOWN;
5102 *width = PCIE_LNK_WIDTH_UNKNOWN;
5103
5104 while (dev) {
5105 u16 lnksta;
5106 enum pci_bus_speed next_speed;
5107 enum pcie_link_width next_width;
5108
5109 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5110 if (ret)
5111 return ret;
5112
5113 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5114 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5115 PCI_EXP_LNKSTA_NLW_SHIFT;
5116
5117 if (next_speed < *speed)
5118 *speed = next_speed;
5119
5120 if (next_width < *width)
5121 *width = next_width;
5122
5123 dev = dev->bus->self;
5124 }
5125
5126 return 0;
5127}
5128EXPORT_SYMBOL(pcie_get_minimum_link);
5129
5130/**
5131 * pcie_bandwidth_available - determine minimum link settings of a PCIe 5125 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5132 * device and its bandwidth limitation 5126 * device and its bandwidth limitation
5133 * @dev: PCI device to query 5127 * @dev: PCI device to query
@@ -5717,15 +5711,14 @@ static void pci_no_domains(void)
5717#endif 5711#endif
5718} 5712}
5719 5713
5720#ifdef CONFIG_PCI_DOMAINS 5714#ifdef CONFIG_PCI_DOMAINS_GENERIC
5721static atomic_t __domain_nr = ATOMIC_INIT(-1); 5715static atomic_t __domain_nr = ATOMIC_INIT(-1);
5722 5716
5723int pci_get_new_domain_nr(void) 5717static int pci_get_new_domain_nr(void)
5724{ 5718{
5725 return atomic_inc_return(&__domain_nr); 5719 return atomic_inc_return(&__domain_nr);
5726} 5720}
5727 5721
5728#ifdef CONFIG_PCI_DOMAINS_GENERIC
5729static int of_pci_bus_find_domain_nr(struct device *parent) 5722static int of_pci_bus_find_domain_nr(struct device *parent)
5730{ 5723{
5731 static int use_dt_domains = -1; 5724 static int use_dt_domains = -1;
@@ -5780,7 +5773,6 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5780 acpi_pci_bus_find_domain_nr(bus); 5773 acpi_pci_bus_find_domain_nr(bus);
5781} 5774}
5782#endif 5775#endif
5783#endif
5784 5776
5785/** 5777/**
5786 * pci_ext_cfg_avail - can we access extended PCI config space? 5778 * pci_ext_cfg_avail - can we access extended PCI config space?
@@ -5808,6 +5800,9 @@ static int __init pci_setup(char *str)
5808 if (*str && (str = pcibios_setup(str)) && *str) { 5800 if (*str && (str = pcibios_setup(str)) && *str) {
5809 if (!strcmp(str, "nomsi")) { 5801 if (!strcmp(str, "nomsi")) {
5810 pci_no_msi(); 5802 pci_no_msi();
5803 } else if (!strncmp(str, "noats", 5)) {
5804 pr_info("PCIe: ATS is disabled\n");
5805 pcie_ats_disabled = true;
5811 } else if (!strcmp(str, "noaer")) { 5806 } else if (!strcmp(str, "noaer")) {
5812 pci_no_aer(); 5807 pci_no_aer();
5813 } else if (!strncmp(str, "realloc=", 8)) { 5808 } else if (!strncmp(str, "realloc=", 8)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 023f7cf25bff..c358e7a07f3f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -353,6 +353,11 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
353 353
354void pci_enable_acs(struct pci_dev *dev); 354void pci_enable_acs(struct pci_dev *dev);
355 355
356/* PCI error reporting and recovery */
357void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
358void pcie_do_nonfatal_recovery(struct pci_dev *dev);
359
360bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
356#ifdef CONFIG_PCIEASPM 361#ifdef CONFIG_PCIEASPM
357void pcie_aspm_init_link_state(struct pci_dev *pdev); 362void pcie_aspm_init_link_state(struct pci_dev *pdev);
358void pcie_aspm_exit_link_state(struct pci_dev *pdev); 363void pcie_aspm_exit_link_state(struct pci_dev *pdev);
@@ -407,4 +412,44 @@ static inline u64 pci_rebar_size_to_bytes(int size)
407 return 1ULL << (size + 20); 412 return 1ULL << (size + 20);
408} 413}
409 414
415struct device_node;
416
417#ifdef CONFIG_OF
418int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
419int of_get_pci_domain_nr(struct device_node *node);
420int of_pci_get_max_link_speed(struct device_node *node);
421
422#else
423static inline int
424of_pci_parse_bus_range(struct device_node *node, struct resource *res)
425{
426 return -EINVAL;
427}
428
429static inline int
430of_get_pci_domain_nr(struct device_node *node)
431{
432 return -1;
433}
434
435static inline int
436of_pci_get_max_link_speed(struct device_node *node)
437{
438 return -EINVAL;
439}
440#endif /* CONFIG_OF */
441
442#if defined(CONFIG_OF_ADDRESS)
443int devm_of_pci_get_host_bridge_resources(struct device *dev,
444 unsigned char busno, unsigned char bus_max,
445 struct list_head *resources, resource_size_t *io_base);
446#else
447static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
448 unsigned char busno, unsigned char bus_max,
449 struct list_head *resources, resource_size_t *io_base)
450{
451 return -EINVAL;
452}
453#endif
454
410#endif /* DRIVERS_PCI_H */ 455#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 800e1d404a45..03f4e0b3a140 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,7 +2,7 @@
2# 2#
3# Makefile for PCI Express features and port driver 3# Makefile for PCI Express features and port driver
4 4
5pcieportdrv-y := portdrv_core.o portdrv_pci.o 5pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
6 6
7obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o 7obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
8 8
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 779b3879b1b5..9735c19bf39c 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -94,7 +94,7 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
94 */ 94 */
95static void aer_enable_rootport(struct aer_rpc *rpc) 95static void aer_enable_rootport(struct aer_rpc *rpc)
96{ 96{
97 struct pci_dev *pdev = rpc->rpd->port; 97 struct pci_dev *pdev = rpc->rpd;
98 int aer_pos; 98 int aer_pos;
99 u16 reg16; 99 u16 reg16;
100 u32 reg32; 100 u32 reg32;
@@ -136,7 +136,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
136 */ 136 */
137static void aer_disable_rootport(struct aer_rpc *rpc) 137static void aer_disable_rootport(struct aer_rpc *rpc)
138{ 138{
139 struct pci_dev *pdev = rpc->rpd->port; 139 struct pci_dev *pdev = rpc->rpd;
140 u32 reg32; 140 u32 reg32;
141 int pos; 141 int pos;
142 142
@@ -232,7 +232,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
232 /* Initialize Root lock access, e_lock, to Root Error Status Reg */ 232 /* Initialize Root lock access, e_lock, to Root Error Status Reg */
233 spin_lock_init(&rpc->e_lock); 233 spin_lock_init(&rpc->e_lock);
234 234
235 rpc->rpd = dev; 235 rpc->rpd = dev->port;
236 INIT_WORK(&rpc->dpc_handler, aer_isr); 236 INIT_WORK(&rpc->dpc_handler, aer_isr);
237 mutex_init(&rpc->rpc_mutex); 237 mutex_init(&rpc->rpc_mutex);
238 238
@@ -353,10 +353,7 @@ static void aer_error_resume(struct pci_dev *dev)
353 pos = dev->aer_cap; 353 pos = dev->aer_cap;
354 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 354 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
355 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 355 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
356 if (dev->error_state == pci_channel_io_normal) 356 status &= ~mask; /* Clear corresponding nonfatal bits */
357 status &= ~mask; /* Clear corresponding nonfatal bits */
358 else
359 status &= mask; /* Clear corresponding fatal bits */
360 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 357 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
361} 358}
362 359
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 08b4584f62fe..6e0ad9a68fd9 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -58,7 +58,7 @@ struct aer_err_source {
58}; 58};
59 59
60struct aer_rpc { 60struct aer_rpc {
61 struct pcie_device *rpd; /* Root Port device */ 61 struct pci_dev *rpd; /* Root Port device */
62 struct work_struct dpc_handler; 62 struct work_struct dpc_handler;
63 struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX]; 63 struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
64 struct aer_err_info e_info; 64 struct aer_err_info e_info;
@@ -76,36 +76,6 @@ struct aer_rpc {
76 */ 76 */
77}; 77};
78 78
79struct aer_broadcast_data {
80 enum pci_channel_state state;
81 enum pci_ers_result result;
82};
83
84static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
85 enum pci_ers_result new)
86{
87 if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
88 return PCI_ERS_RESULT_NO_AER_DRIVER;
89
90 if (new == PCI_ERS_RESULT_NONE)
91 return orig;
92
93 switch (orig) {
94 case PCI_ERS_RESULT_CAN_RECOVER:
95 case PCI_ERS_RESULT_RECOVERED:
96 orig = new;
97 break;
98 case PCI_ERS_RESULT_DISCONNECT:
99 if (new == PCI_ERS_RESULT_NEED_RESET)
100 orig = PCI_ERS_RESULT_NEED_RESET;
101 break;
102 default:
103 break;
104 }
105
106 return orig;
107}
108
109extern struct bus_type pcie_port_bus_type; 79extern struct bus_type pcie_port_bus_type;
110void aer_isr(struct work_struct *work); 80void aer_isr(struct work_struct *work);
111void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 81void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 0ea5acc40323..42d4f3f32282 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -20,6 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/kfifo.h> 21#include <linux/kfifo.h>
22#include "aerdrv.h" 22#include "aerdrv.h"
23#include "../../pci.h"
23 24
24#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ 25#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
25 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) 26 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
@@ -227,329 +228,14 @@ static bool find_source_device(struct pci_dev *parent,
227 return true; 228 return true;
228} 229}
229 230
230static int report_error_detected(struct pci_dev *dev, void *data)
231{
232 pci_ers_result_t vote;
233 const struct pci_error_handlers *err_handler;
234 struct aer_broadcast_data *result_data;
235 result_data = (struct aer_broadcast_data *) data;
236
237 device_lock(&dev->dev);
238 dev->error_state = result_data->state;
239
240 if (!dev->driver ||
241 !dev->driver->err_handler ||
242 !dev->driver->err_handler->error_detected) {
243 if (result_data->state == pci_channel_io_frozen &&
244 dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
245 /*
246 * In case of fatal recovery, if one of down-
247 * stream device has no driver. We might be
248 * unable to recover because a later insmod
249 * of a driver for this device is unaware of
250 * its hw state.
251 */
252 pci_printk(KERN_DEBUG, dev, "device has %s\n",
253 dev->driver ?
254 "no AER-aware driver" : "no driver");
255 }
256
257 /*
258 * If there's any device in the subtree that does not
259 * have an error_detected callback, returning
260 * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
261 * the subsequent mmio_enabled/slot_reset/resume
262 * callbacks of "any" device in the subtree. All the
263 * devices in the subtree are left in the error state
264 * without recovery.
265 */
266
267 if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
268 vote = PCI_ERS_RESULT_NO_AER_DRIVER;
269 else
270 vote = PCI_ERS_RESULT_NONE;
271 } else {
272 err_handler = dev->driver->err_handler;
273 vote = err_handler->error_detected(dev, result_data->state);
274 pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
275 }
276
277 result_data->result = merge_result(result_data->result, vote);
278 device_unlock(&dev->dev);
279 return 0;
280}
281
282static int report_mmio_enabled(struct pci_dev *dev, void *data)
283{
284 pci_ers_result_t vote;
285 const struct pci_error_handlers *err_handler;
286 struct aer_broadcast_data *result_data;
287 result_data = (struct aer_broadcast_data *) data;
288
289 device_lock(&dev->dev);
290 if (!dev->driver ||
291 !dev->driver->err_handler ||
292 !dev->driver->err_handler->mmio_enabled)
293 goto out;
294
295 err_handler = dev->driver->err_handler;
296 vote = err_handler->mmio_enabled(dev);
297 result_data->result = merge_result(result_data->result, vote);
298out:
299 device_unlock(&dev->dev);
300 return 0;
301}
302
303static int report_slot_reset(struct pci_dev *dev, void *data)
304{
305 pci_ers_result_t vote;
306 const struct pci_error_handlers *err_handler;
307 struct aer_broadcast_data *result_data;
308 result_data = (struct aer_broadcast_data *) data;
309
310 device_lock(&dev->dev);
311 if (!dev->driver ||
312 !dev->driver->err_handler ||
313 !dev->driver->err_handler->slot_reset)
314 goto out;
315
316 err_handler = dev->driver->err_handler;
317 vote = err_handler->slot_reset(dev);
318 result_data->result = merge_result(result_data->result, vote);
319out:
320 device_unlock(&dev->dev);
321 return 0;
322}
323
324static int report_resume(struct pci_dev *dev, void *data)
325{
326 const struct pci_error_handlers *err_handler;
327
328 device_lock(&dev->dev);
329 dev->error_state = pci_channel_io_normal;
330
331 if (!dev->driver ||
332 !dev->driver->err_handler ||
333 !dev->driver->err_handler->resume)
334 goto out;
335
336 err_handler = dev->driver->err_handler;
337 err_handler->resume(dev);
338 pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
339out:
340 device_unlock(&dev->dev);
341 return 0;
342}
343
344/**
345 * broadcast_error_message - handle message broadcast to downstream drivers
346 * @dev: pointer to from where in a hierarchy message is broadcasted down
347 * @state: error state
348 * @error_mesg: message to print
349 * @cb: callback to be broadcasted
350 *
351 * Invoked during error recovery process. Once being invoked, the content
352 * of error severity will be broadcasted to all downstream drivers in a
353 * hierarchy in question.
354 */
355static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
356 enum pci_channel_state state,
357 char *error_mesg,
358 int (*cb)(struct pci_dev *, void *))
359{
360 struct aer_broadcast_data result_data;
361
362 pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
363 result_data.state = state;
364 if (cb == report_error_detected)
365 result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
366 else
367 result_data.result = PCI_ERS_RESULT_RECOVERED;
368
369 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
370 /*
371 * If the error is reported by a bridge, we think this error
372 * is related to the downstream link of the bridge, so we
373 * do error recovery on all subordinates of the bridge instead
374 * of the bridge and clear the error status of the bridge.
375 */
376 if (cb == report_error_detected)
377 dev->error_state = state;
378 pci_walk_bus(dev->subordinate, cb, &result_data);
379 if (cb == report_resume) {
380 pci_cleanup_aer_uncorrect_error_status(dev);
381 dev->error_state = pci_channel_io_normal;
382 }
383 } else {
384 /*
385 * If the error is reported by an end point, we think this
386 * error is related to the upstream link of the end point.
387 */
388 if (state == pci_channel_io_normal)
389 /*
390 * the error is non fatal so the bus is ok, just invoke
391 * the callback for the function that logged the error.
392 */
393 cb(dev, &result_data);
394 else
395 pci_walk_bus(dev->bus, cb, &result_data);
396 }
397
398 return result_data.result;
399}
400
401/**
402 * default_reset_link - default reset function
403 * @dev: pointer to pci_dev data structure
404 *
405 * Invoked when performing link reset on a Downstream Port or a
406 * Root Port with no aer driver.
407 */
408static pci_ers_result_t default_reset_link(struct pci_dev *dev)
409{
410 pci_reset_bridge_secondary_bus(dev);
411 pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
412 return PCI_ERS_RESULT_RECOVERED;
413}
414
415static int find_aer_service_iter(struct device *device, void *data)
416{
417 struct pcie_port_service_driver *service_driver, **drv;
418
419 drv = (struct pcie_port_service_driver **) data;
420
421 if (device->bus == &pcie_port_bus_type && device->driver) {
422 service_driver = to_service_driver(device->driver);
423 if (service_driver->service == PCIE_PORT_SERVICE_AER) {
424 *drv = service_driver;
425 return 1;
426 }
427 }
428
429 return 0;
430}
431
432static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
433{
434 struct pcie_port_service_driver *drv = NULL;
435
436 device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
437
438 return drv;
439}
440
441static pci_ers_result_t reset_link(struct pci_dev *dev)
442{
443 struct pci_dev *udev;
444 pci_ers_result_t status;
445 struct pcie_port_service_driver *driver;
446
447 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
448 /* Reset this port for all subordinates */
449 udev = dev;
450 } else {
451 /* Reset the upstream component (likely downstream port) */
452 udev = dev->bus->self;
453 }
454
455 /* Use the aer driver of the component firstly */
456 driver = find_aer_service(udev);
457
458 if (driver && driver->reset_link) {
459 status = driver->reset_link(udev);
460 } else if (udev->has_secondary_link) {
461 status = default_reset_link(udev);
462 } else {
463 pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
464 pci_name(udev));
465 return PCI_ERS_RESULT_DISCONNECT;
466 }
467
468 if (status != PCI_ERS_RESULT_RECOVERED) {
469 pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
470 pci_name(udev));
471 return PCI_ERS_RESULT_DISCONNECT;
472 }
473
474 return status;
475}
476
477/**
478 * do_recovery - handle nonfatal/fatal error recovery process
479 * @dev: pointer to a pci_dev data structure of agent detecting an error
480 * @severity: error severity type
481 *
482 * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
483 * error detected message to all downstream drivers within a hierarchy in
484 * question and return the returned code.
485 */
486static void do_recovery(struct pci_dev *dev, int severity)
487{
488 pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
489 enum pci_channel_state state;
490
491 if (severity == AER_FATAL)
492 state = pci_channel_io_frozen;
493 else
494 state = pci_channel_io_normal;
495
496 status = broadcast_error_message(dev,
497 state,
498 "error_detected",
499 report_error_detected);
500
501 if (severity == AER_FATAL) {
502 result = reset_link(dev);
503 if (result != PCI_ERS_RESULT_RECOVERED)
504 goto failed;
505 }
506
507 if (status == PCI_ERS_RESULT_CAN_RECOVER)
508 status = broadcast_error_message(dev,
509 state,
510 "mmio_enabled",
511 report_mmio_enabled);
512
513 if (status == PCI_ERS_RESULT_NEED_RESET) {
514 /*
515 * TODO: Should call platform-specific
516 * functions to reset slot before calling
517 * drivers' slot_reset callbacks?
518 */
519 status = broadcast_error_message(dev,
520 state,
521 "slot_reset",
522 report_slot_reset);
523 }
524
525 if (status != PCI_ERS_RESULT_RECOVERED)
526 goto failed;
527
528 broadcast_error_message(dev,
529 state,
530 "resume",
531 report_resume);
532
533 pci_info(dev, "AER: Device recovery successful\n");
534 return;
535
536failed:
537 pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
538 /* TODO: Should kernel panic here? */
539 pci_info(dev, "AER: Device recovery failed\n");
540}
541
542/** 231/**
543 * handle_error_source - handle logging error into an event log 232 * handle_error_source - handle logging error into an event log
544 * @aerdev: pointer to pcie_device data structure of the root port
545 * @dev: pointer to pci_dev data structure of error source device 233 * @dev: pointer to pci_dev data structure of error source device
546 * @info: comprehensive error information 234 * @info: comprehensive error information
547 * 235 *
548 * Invoked when an error being detected by Root Port. 236 * Invoked when an error being detected by Root Port.
549 */ 237 */
550static void handle_error_source(struct pcie_device *aerdev, 238static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
551 struct pci_dev *dev,
552 struct aer_err_info *info)
553{ 239{
554 int pos; 240 int pos;
555 241
@@ -562,12 +248,13 @@ static void handle_error_source(struct pcie_device *aerdev,
562 if (pos) 248 if (pos)
563 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, 249 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
564 info->status); 250 info->status);
565 } else 251 } else if (info->severity == AER_NONFATAL)
566 do_recovery(dev, info->severity); 252 pcie_do_nonfatal_recovery(dev);
253 else if (info->severity == AER_FATAL)
254 pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
567} 255}
568 256
569#ifdef CONFIG_ACPI_APEI_PCIEAER 257#ifdef CONFIG_ACPI_APEI_PCIEAER
570static void aer_recover_work_func(struct work_struct *work);
571 258
572#define AER_RECOVER_RING_ORDER 4 259#define AER_RECOVER_RING_ORDER 4
573#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER) 260#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
@@ -582,6 +269,30 @@ struct aer_recover_entry {
582 269
583static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, 270static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
584 AER_RECOVER_RING_SIZE); 271 AER_RECOVER_RING_SIZE);
272
273static void aer_recover_work_func(struct work_struct *work)
274{
275 struct aer_recover_entry entry;
276 struct pci_dev *pdev;
277
278 while (kfifo_get(&aer_recover_ring, &entry)) {
279 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
280 entry.devfn);
281 if (!pdev) {
282 pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
283 entry.domain, entry.bus,
284 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
285 continue;
286 }
287 cper_print_aer(pdev, entry.severity, entry.regs);
288 if (entry.severity == AER_NONFATAL)
289 pcie_do_nonfatal_recovery(pdev);
290 else if (entry.severity == AER_FATAL)
291 pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
292 pci_dev_put(pdev);
293 }
294}
295
585/* 296/*
586 * Mutual exclusion for writers of aer_recover_ring, reader side don't 297 * Mutual exclusion for writers of aer_recover_ring, reader side don't
587 * need lock, because there is only one reader and lock is not needed 298 * need lock, because there is only one reader and lock is not needed
@@ -611,27 +322,6 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
611 spin_unlock_irqrestore(&aer_recover_ring_lock, flags); 322 spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
612} 323}
613EXPORT_SYMBOL_GPL(aer_recover_queue); 324EXPORT_SYMBOL_GPL(aer_recover_queue);
614
615static void aer_recover_work_func(struct work_struct *work)
616{
617 struct aer_recover_entry entry;
618 struct pci_dev *pdev;
619
620 while (kfifo_get(&aer_recover_ring, &entry)) {
621 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
622 entry.devfn);
623 if (!pdev) {
624 pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
625 entry.domain, entry.bus,
626 PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
627 continue;
628 }
629 cper_print_aer(pdev, entry.severity, entry.regs);
630 if (entry.severity != AER_CORRECTABLE)
631 do_recovery(pdev, entry.severity);
632 pci_dev_put(pdev);
633 }
634}
635#endif 325#endif
636 326
637/** 327/**
@@ -695,8 +385,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
695 return 1; 385 return 1;
696} 386}
697 387
698static inline void aer_process_err_devices(struct pcie_device *p_device, 388static inline void aer_process_err_devices(struct aer_err_info *e_info)
699 struct aer_err_info *e_info)
700{ 389{
701 int i; 390 int i;
702 391
@@ -707,19 +396,19 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
707 } 396 }
708 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { 397 for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
709 if (get_device_error_info(e_info->dev[i], e_info)) 398 if (get_device_error_info(e_info->dev[i], e_info))
710 handle_error_source(p_device, e_info->dev[i], e_info); 399 handle_error_source(e_info->dev[i], e_info);
711 } 400 }
712} 401}
713 402
714/** 403/**
715 * aer_isr_one_error - consume an error detected by root port 404 * aer_isr_one_error - consume an error detected by root port
716 * @p_device: pointer to error root port service device 405 * @rpc: pointer to the root port which holds an error
717 * @e_src: pointer to an error source 406 * @e_src: pointer to an error source
718 */ 407 */
719static void aer_isr_one_error(struct pcie_device *p_device, 408static void aer_isr_one_error(struct aer_rpc *rpc,
720 struct aer_err_source *e_src) 409 struct aer_err_source *e_src)
721{ 410{
722 struct aer_rpc *rpc = get_service_data(p_device); 411 struct pci_dev *pdev = rpc->rpd;
723 struct aer_err_info *e_info = &rpc->e_info; 412 struct aer_err_info *e_info = &rpc->e_info;
724 413
725 /* 414 /*
@@ -734,11 +423,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
734 e_info->multi_error_valid = 1; 423 e_info->multi_error_valid = 1;
735 else 424 else
736 e_info->multi_error_valid = 0; 425 e_info->multi_error_valid = 0;
426 aer_print_port_info(pdev, e_info);
737 427
738 aer_print_port_info(p_device->port, e_info); 428 if (find_source_device(pdev, e_info))
739 429 aer_process_err_devices(e_info);
740 if (find_source_device(p_device->port, e_info))
741 aer_process_err_devices(p_device, e_info);
742 } 430 }
743 431
744 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { 432 if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
@@ -754,10 +442,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
754 else 442 else
755 e_info->multi_error_valid = 0; 443 e_info->multi_error_valid = 0;
756 444
757 aer_print_port_info(p_device->port, e_info); 445 aer_print_port_info(pdev, e_info);
758 446
759 if (find_source_device(p_device->port, e_info)) 447 if (find_source_device(pdev, e_info))
760 aer_process_err_devices(p_device, e_info); 448 aer_process_err_devices(e_info);
761 } 449 }
762} 450}
763 451
@@ -799,11 +487,10 @@ static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
799void aer_isr(struct work_struct *work) 487void aer_isr(struct work_struct *work)
800{ 488{
801 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); 489 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
802 struct pcie_device *p_device = rpc->rpd;
803 struct aer_err_source uninitialized_var(e_src); 490 struct aer_err_source uninitialized_var(e_src);
804 491
805 mutex_lock(&rpc->rpc_mutex); 492 mutex_lock(&rpc->rpc_mutex);
806 while (get_e_source(rpc, &e_src)) 493 while (get_e_source(rpc, &e_src))
807 aer_isr_one_error(p_device, &e_src); 494 aer_isr_one_error(rpc, &e_src);
808 mutex_unlock(&rpc->rpc_mutex); 495 mutex_unlock(&rpc->rpc_mutex);
809} 496}
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index cfc89dd57831..4985bdf64c2e 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -163,17 +163,17 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
163 int id = ((dev->bus->number << 8) | dev->devfn); 163 int id = ((dev->bus->number << 8) | dev->devfn);
164 164
165 if (!info->status) { 165 if (!info->status) {
166 pci_err(dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n", 166 pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
167 aer_error_severity_string[info->severity], id); 167 aer_error_severity_string[info->severity]);
168 goto out; 168 goto out;
169 } 169 }
170 170
171 layer = AER_GET_LAYER_ERROR(info->severity, info->status); 171 layer = AER_GET_LAYER_ERROR(info->severity, info->status);
172 agent = AER_GET_AGENT(info->severity, info->status); 172 agent = AER_GET_AGENT(info->severity, info->status);
173 173
174 pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", 174 pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
175 aer_error_severity_string[info->severity], 175 aer_error_severity_string[info->severity],
176 aer_error_layer[layer], id, aer_agent_string[agent]); 176 aer_error_layer[layer], aer_agent_string[agent]);
177 177
178 pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", 178 pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
179 dev->vendor, dev->device, 179 dev->vendor, dev->device,
@@ -186,17 +186,21 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
186 186
187out: 187out:
188 if (info->id && info->error_dev_num > 1 && info->id == id) 188 if (info->id && info->error_dev_num > 1 && info->id == id)
189 pci_err(dev, " Error of this Agent(%04x) is reported first\n", id); 189 pci_err(dev, " Error of this Agent is reported first\n");
190 190
191 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), 191 trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
192 info->severity); 192 info->severity, info->tlp_header_valid, &info->tlp);
193} 193}
194 194
195void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) 195void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
196{ 196{
197 pci_info(dev, "AER: %s%s error received: id=%04x\n", 197 u8 bus = info->id >> 8;
198 u8 devfn = info->id & 0xff;
199
200 pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
198 info->multi_error_valid ? "Multiple " : "", 201 info->multi_error_valid ? "Multiple " : "",
199 aer_error_severity_string[info->severity], info->id); 202 aer_error_severity_string[info->severity],
203 pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
200} 204}
201 205
202#ifdef CONFIG_ACPI_APEI_PCIEAER 206#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -216,28 +220,30 @@ EXPORT_SYMBOL_GPL(cper_severity_to_aer);
216void cper_print_aer(struct pci_dev *dev, int aer_severity, 220void cper_print_aer(struct pci_dev *dev, int aer_severity,
217 struct aer_capability_regs *aer) 221 struct aer_capability_regs *aer)
218{ 222{
219 int layer, agent, status_strs_size, tlp_header_valid = 0; 223 int layer, agent, tlp_header_valid = 0;
220 u32 status, mask; 224 u32 status, mask;
221 const char **status_strs; 225 struct aer_err_info info;
222 226
223 if (aer_severity == AER_CORRECTABLE) { 227 if (aer_severity == AER_CORRECTABLE) {
224 status = aer->cor_status; 228 status = aer->cor_status;
225 mask = aer->cor_mask; 229 mask = aer->cor_mask;
226 status_strs = aer_correctable_error_string;
227 status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
228 } else { 230 } else {
229 status = aer->uncor_status; 231 status = aer->uncor_status;
230 mask = aer->uncor_mask; 232 mask = aer->uncor_mask;
231 status_strs = aer_uncorrectable_error_string;
232 status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
233 tlp_header_valid = status & AER_LOG_TLP_MASKS; 233 tlp_header_valid = status & AER_LOG_TLP_MASKS;
234 } 234 }
235 235
236 layer = AER_GET_LAYER_ERROR(aer_severity, status); 236 layer = AER_GET_LAYER_ERROR(aer_severity, status);
237 agent = AER_GET_AGENT(aer_severity, status); 237 agent = AER_GET_AGENT(aer_severity, status);
238 238
239 memset(&info, 0, sizeof(info));
240 info.severity = aer_severity;
241 info.status = status;
242 info.mask = mask;
243 info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
244
239 pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); 245 pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
240 cper_print_bits("", status, status_strs, status_strs_size); 246 __aer_print_error(dev, &info);
241 pci_err(dev, "aer_layer=%s, aer_agent=%s\n", 247 pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
242 aer_error_layer[layer], aer_agent_string[agent]); 248 aer_error_layer[layer], aer_agent_string[agent]);
243 249
@@ -249,6 +255,6 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
249 __print_tlp_header(dev, &aer->header_log); 255 __print_tlp_header(dev, &aer->header_log);
250 256
251 trace_aer_event(dev_name(&dev->dev), (status & ~mask), 257 trace_aer_event(dev_name(&dev->dev), (status & ~mask),
252 aer_severity); 258 aer_severity, tlp_header_valid, &aer->header_log);
253} 259}
254#endif 260#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f76eb7704f64..c687c817b47d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -400,6 +400,15 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
400 info->l1ss_cap = 0; 400 info->l1ss_cap = 0;
401 return; 401 return;
402 } 402 }
403
404 /*
405 * If we don't have LTR for the entire path from the Root Complex
406 * to this device, we can't use ASPM L1.2 because it relies on the
407 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
408 */
409 if (!pdev->ltr_path)
410 info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
411
403 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1, 412 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
404 &info->l1ss_ctl1); 413 &info->l1ss_ctl1);
405 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2, 414 pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 8c57d607e603..d6436681c535 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -68,44 +68,35 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
68 68
69static void dpc_wait_link_inactive(struct dpc_dev *dpc) 69static void dpc_wait_link_inactive(struct dpc_dev *dpc)
70{ 70{
71 unsigned long timeout = jiffies + HZ;
72 struct pci_dev *pdev = dpc->dev->port; 71 struct pci_dev *pdev = dpc->dev->port;
73 struct device *dev = &dpc->dev->device;
74 u16 lnk_status;
75 72
76 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); 73 pcie_wait_for_link(pdev, false);
77 while (lnk_status & PCI_EXP_LNKSTA_DLLLA &&
78 !time_after(jiffies, timeout)) {
79 msleep(10);
80 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
81 }
82 if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
83 dev_warn(dev, "Link state not disabled for DPC event\n");
84} 74}
85 75
86static void dpc_work(struct work_struct *work) 76static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
87{ 77{
88 struct dpc_dev *dpc = container_of(work, struct dpc_dev, work); 78 struct dpc_dev *dpc;
89 struct pci_dev *dev, *temp, *pdev = dpc->dev->port; 79 struct pcie_device *pciedev;
90 struct pci_bus *parent = pdev->subordinate; 80 struct device *devdpc;
91 u16 cap = dpc->cap_pos, ctl; 81 u16 cap, ctl;
92 82
93 pci_lock_rescan_remove(); 83 /*
94 list_for_each_entry_safe_reverse(dev, temp, &parent->devices, 84 * DPC disables the Link automatically in hardware, so it has
95 bus_list) { 85 * already been reset by the time we get here.
96 pci_dev_get(dev); 86 */
97 pci_dev_set_disconnected(dev, NULL); 87 devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
98 if (pci_has_subordinate(dev)) 88 pciedev = to_pcie_device(devdpc);
99 pci_walk_bus(dev->subordinate, 89 dpc = get_service_data(pciedev);
100 pci_dev_set_disconnected, NULL); 90 cap = dpc->cap_pos;
101 pci_stop_and_remove_bus_device(dev); 91
102 pci_dev_put(dev); 92 /*
103 } 93 * Wait until the Link is inactive, then clear DPC Trigger Status
104 pci_unlock_rescan_remove(); 94 * to allow the Port to leave DPC.
105 95 */
106 dpc_wait_link_inactive(dpc); 96 dpc_wait_link_inactive(dpc);
97
107 if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc)) 98 if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
108 return; 99 return PCI_ERS_RESULT_DISCONNECT;
109 if (dpc->rp_extensions && dpc->rp_pio_status) { 100 if (dpc->rp_extensions && dpc->rp_pio_status) {
110 pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, 101 pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
111 dpc->rp_pio_status); 102 dpc->rp_pio_status);
@@ -113,11 +104,22 @@ static void dpc_work(struct work_struct *work)
113 } 104 }
114 105
115 pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS, 106 pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
116 PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT); 107 PCI_EXP_DPC_STATUS_TRIGGER);
117 108
118 pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl); 109 pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
119 pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL, 110 pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
120 ctl | PCI_EXP_DPC_CTL_INT_EN); 111 ctl | PCI_EXP_DPC_CTL_INT_EN);
112
113 return PCI_ERS_RESULT_RECOVERED;
114}
115
116static void dpc_work(struct work_struct *work)
117{
118 struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
119 struct pci_dev *pdev = dpc->dev->port;
120
121 /* We configure DPC so it only triggers on ERR_FATAL */
122 pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
121} 123}
122 124
123static void dpc_process_rp_pio_error(struct dpc_dev *dpc) 125static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
@@ -223,6 +225,9 @@ static irqreturn_t dpc_irq(int irq, void *context)
223 if (dpc->rp_extensions && reason == 3 && ext_reason == 0) 225 if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
224 dpc_process_rp_pio_error(dpc); 226 dpc_process_rp_pio_error(dpc);
225 227
228 pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
229 PCI_EXP_DPC_STATUS_INTERRUPT);
230
226 schedule_work(&dpc->work); 231 schedule_work(&dpc->work);
227 232
228 return IRQ_HANDLED; 233 return IRQ_HANDLED;
@@ -270,7 +275,7 @@ static int dpc_probe(struct pcie_device *dev)
270 } 275 }
271 } 276 }
272 277
273 ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; 278 ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
274 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); 279 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
275 280
276 dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", 281 dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
@@ -288,7 +293,7 @@ static void dpc_remove(struct pcie_device *dev)
288 u16 ctl; 293 u16 ctl;
289 294
290 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); 295 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
291 ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN); 296 ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
292 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); 297 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
293} 298}
294 299
@@ -298,6 +303,7 @@ static struct pcie_port_service_driver dpcdriver = {
298 .service = PCIE_PORT_SERVICE_DPC, 303 .service = PCIE_PORT_SERVICE_DPC,
299 .probe = dpc_probe, 304 .probe = dpc_probe,
300 .remove = dpc_remove, 305 .remove = dpc_remove,
306 .reset_link = dpc_reset_link,
301}; 307};
302 308
303static int __init dpc_service_init(void) 309static int __init dpc_service_init(void)
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
new file mode 100644
index 000000000000..f7ce0cb0b0b7
--- /dev/null
+++ b/drivers/pci/pcie/err.c
@@ -0,0 +1,388 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file implements the error recovery as a core part of PCIe error
4 * reporting. When a PCIe error is delivered, an error message will be
5 * collected and printed to console, then, an error recovery procedure
6 * will be executed by following the PCI error recovery rules.
7 *
8 * Copyright (C) 2006 Intel Corp.
9 * Tom Long Nguyen (tom.l.nguyen@intel.com)
10 * Zhang Yanmin (yanmin.zhang@intel.com)
11 */
12
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/aer.h>
19#include "portdrv.h"
20#include "../pci.h"
21
22struct aer_broadcast_data {
23 enum pci_channel_state state;
24 enum pci_ers_result result;
25};
26
27static pci_ers_result_t merge_result(enum pci_ers_result orig,
28 enum pci_ers_result new)
29{
30 if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
31 return PCI_ERS_RESULT_NO_AER_DRIVER;
32
33 if (new == PCI_ERS_RESULT_NONE)
34 return orig;
35
36 switch (orig) {
37 case PCI_ERS_RESULT_CAN_RECOVER:
38 case PCI_ERS_RESULT_RECOVERED:
39 orig = new;
40 break;
41 case PCI_ERS_RESULT_DISCONNECT:
42 if (new == PCI_ERS_RESULT_NEED_RESET)
43 orig = PCI_ERS_RESULT_NEED_RESET;
44 break;
45 default:
46 break;
47 }
48
49 return orig;
50}
51
52static int report_error_detected(struct pci_dev *dev, void *data)
53{
54 pci_ers_result_t vote;
55 const struct pci_error_handlers *err_handler;
56 struct aer_broadcast_data *result_data;
57
58 result_data = (struct aer_broadcast_data *) data;
59
60 device_lock(&dev->dev);
61 dev->error_state = result_data->state;
62
63 if (!dev->driver ||
64 !dev->driver->err_handler ||
65 !dev->driver->err_handler->error_detected) {
66 if (result_data->state == pci_channel_io_frozen &&
67 dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
68 /*
69 * In case of fatal recovery, if one of down-
70 * stream device has no driver. We might be
71 * unable to recover because a later insmod
72 * of a driver for this device is unaware of
73 * its hw state.
74 */
75 pci_printk(KERN_DEBUG, dev, "device has %s\n",
76 dev->driver ?
77 "no AER-aware driver" : "no driver");
78 }
79
80 /*
81 * If there's any device in the subtree that does not
82 * have an error_detected callback, returning
83 * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
84 * the subsequent mmio_enabled/slot_reset/resume
85 * callbacks of "any" device in the subtree. All the
86 * devices in the subtree are left in the error state
87 * without recovery.
88 */
89
90 if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
91 vote = PCI_ERS_RESULT_NO_AER_DRIVER;
92 else
93 vote = PCI_ERS_RESULT_NONE;
94 } else {
95 err_handler = dev->driver->err_handler;
96 vote = err_handler->error_detected(dev, result_data->state);
97 pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
98 }
99
100 result_data->result = merge_result(result_data->result, vote);
101 device_unlock(&dev->dev);
102 return 0;
103}
104
105static int report_mmio_enabled(struct pci_dev *dev, void *data)
106{
107 pci_ers_result_t vote;
108 const struct pci_error_handlers *err_handler;
109 struct aer_broadcast_data *result_data;
110
111 result_data = (struct aer_broadcast_data *) data;
112
113 device_lock(&dev->dev);
114 if (!dev->driver ||
115 !dev->driver->err_handler ||
116 !dev->driver->err_handler->mmio_enabled)
117 goto out;
118
119 err_handler = dev->driver->err_handler;
120 vote = err_handler->mmio_enabled(dev);
121 result_data->result = merge_result(result_data->result, vote);
122out:
123 device_unlock(&dev->dev);
124 return 0;
125}
126
127static int report_slot_reset(struct pci_dev *dev, void *data)
128{
129 pci_ers_result_t vote;
130 const struct pci_error_handlers *err_handler;
131 struct aer_broadcast_data *result_data;
132
133 result_data = (struct aer_broadcast_data *) data;
134
135 device_lock(&dev->dev);
136 if (!dev->driver ||
137 !dev->driver->err_handler ||
138 !dev->driver->err_handler->slot_reset)
139 goto out;
140
141 err_handler = dev->driver->err_handler;
142 vote = err_handler->slot_reset(dev);
143 result_data->result = merge_result(result_data->result, vote);
144out:
145 device_unlock(&dev->dev);
146 return 0;
147}
148
149static int report_resume(struct pci_dev *dev, void *data)
150{
151 const struct pci_error_handlers *err_handler;
152
153 device_lock(&dev->dev);
154 dev->error_state = pci_channel_io_normal;
155
156 if (!dev->driver ||
157 !dev->driver->err_handler ||
158 !dev->driver->err_handler->resume)
159 goto out;
160
161 err_handler = dev->driver->err_handler;
162 err_handler->resume(dev);
163 pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
164out:
165 device_unlock(&dev->dev);
166 return 0;
167}
168
169/**
170 * default_reset_link - default reset function
171 * @dev: pointer to pci_dev data structure
172 *
173 * Invoked when performing link reset on a Downstream Port or a
174 * Root Port with no aer driver.
175 */
176static pci_ers_result_t default_reset_link(struct pci_dev *dev)
177{
178 pci_reset_bridge_secondary_bus(dev);
179 pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
180 return PCI_ERS_RESULT_RECOVERED;
181}
182
183static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
184{
185 struct pci_dev *udev;
186 pci_ers_result_t status;
187 struct pcie_port_service_driver *driver = NULL;
188
189 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
190 /* Reset this port for all subordinates */
191 udev = dev;
192 } else {
193 /* Reset the upstream component (likely downstream port) */
194 udev = dev->bus->self;
195 }
196
197 /* Use the aer driver of the component firstly */
198 driver = pcie_port_find_service(udev, service);
199
200 if (driver && driver->reset_link) {
201 status = driver->reset_link(udev);
202 } else if (udev->has_secondary_link) {
203 status = default_reset_link(udev);
204 } else {
205 pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
206 pci_name(udev));
207 return PCI_ERS_RESULT_DISCONNECT;
208 }
209
210 if (status != PCI_ERS_RESULT_RECOVERED) {
211 pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
212 pci_name(udev));
213 return PCI_ERS_RESULT_DISCONNECT;
214 }
215
216 return status;
217}
218
219/**
220 * broadcast_error_message - handle message broadcast to downstream drivers
221 * @dev: pointer to from where in a hierarchy message is broadcasted down
222 * @state: error state
223 * @error_mesg: message to print
224 * @cb: callback to be broadcasted
225 *
226 * Invoked during error recovery process. Once being invoked, the content
227 * of error severity will be broadcasted to all downstream drivers in a
228 * hierarchy in question.
229 */
230static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
231 enum pci_channel_state state,
232 char *error_mesg,
233 int (*cb)(struct pci_dev *, void *))
234{
235 struct aer_broadcast_data result_data;
236
237 pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
238 result_data.state = state;
239 if (cb == report_error_detected)
240 result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
241 else
242 result_data.result = PCI_ERS_RESULT_RECOVERED;
243
244 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
245 /*
246 * If the error is reported by a bridge, we think this error
247 * is related to the downstream link of the bridge, so we
248 * do error recovery on all subordinates of the bridge instead
249 * of the bridge and clear the error status of the bridge.
250 */
251 if (cb == report_error_detected)
252 dev->error_state = state;
253 pci_walk_bus(dev->subordinate, cb, &result_data);
254 if (cb == report_resume) {
255 pci_cleanup_aer_uncorrect_error_status(dev);
256 dev->error_state = pci_channel_io_normal;
257 }
258 } else {
259 /*
260 * If the error is reported by an end point, we think this
261 * error is related to the upstream link of the end point.
262 */
263 if (state == pci_channel_io_normal)
264 /*
265 * the error is non fatal so the bus is ok, just invoke
266 * the callback for the function that logged the error.
267 */
268 cb(dev, &result_data);
269 else
270 pci_walk_bus(dev->bus, cb, &result_data);
271 }
272
273 return result_data.result;
274}
275
276/**
277 * pcie_do_fatal_recovery - handle fatal error recovery process
278 * @dev: pointer to a pci_dev data structure of agent detecting an error
279 *
280 * Invoked when an error is fatal. Once being invoked, removes the devices
281 * beneath this AER agent, followed by reset link e.g. secondary bus reset
282 * followed by re-enumeration of devices.
283 */
284void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
285{
286 struct pci_dev *udev;
287 struct pci_bus *parent;
288 struct pci_dev *pdev, *temp;
289 pci_ers_result_t result;
290
291 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
292 udev = dev;
293 else
294 udev = dev->bus->self;
295
296 parent = udev->subordinate;
297 pci_lock_rescan_remove();
298 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
299 bus_list) {
300 pci_dev_get(pdev);
301 pci_dev_set_disconnected(pdev, NULL);
302 if (pci_has_subordinate(pdev))
303 pci_walk_bus(pdev->subordinate,
304 pci_dev_set_disconnected, NULL);
305 pci_stop_and_remove_bus_device(pdev);
306 pci_dev_put(pdev);
307 }
308
309 result = reset_link(udev, service);
310
311 if ((service == PCIE_PORT_SERVICE_AER) &&
312 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
313 /*
314 * If the error is reported by a bridge, we think this error
315 * is related to the downstream link of the bridge, so we
316 * do error recovery on all subordinates of the bridge instead
317 * of the bridge and clear the error status of the bridge.
318 */
319 pci_cleanup_aer_uncorrect_error_status(dev);
320 }
321
322 if (result == PCI_ERS_RESULT_RECOVERED) {
323 if (pcie_wait_for_link(udev, true))
324 pci_rescan_bus(udev->bus);
325 pci_info(dev, "Device recovery from fatal error successful\n");
326 } else {
327 pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
328 pci_info(dev, "Device recovery from fatal error failed\n");
329 }
330
331 pci_unlock_rescan_remove();
332}
333
334/**
335 * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
336 * @dev: pointer to a pci_dev data structure of agent detecting an error
337 *
338 * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
339 * error detected message to all downstream drivers within a hierarchy in
340 * question and return the returned code.
341 */
342void pcie_do_nonfatal_recovery(struct pci_dev *dev)
343{
344 pci_ers_result_t status;
345 enum pci_channel_state state;
346
347 state = pci_channel_io_normal;
348
349 status = broadcast_error_message(dev,
350 state,
351 "error_detected",
352 report_error_detected);
353
354 if (status == PCI_ERS_RESULT_CAN_RECOVER)
355 status = broadcast_error_message(dev,
356 state,
357 "mmio_enabled",
358 report_mmio_enabled);
359
360 if (status == PCI_ERS_RESULT_NEED_RESET) {
361 /*
362 * TODO: Should call platform-specific
363 * functions to reset slot before calling
364 * drivers' slot_reset callbacks?
365 */
366 status = broadcast_error_message(dev,
367 state,
368 "slot_reset",
369 report_slot_reset);
370 }
371
372 if (status != PCI_ERS_RESULT_RECOVERED)
373 goto failed;
374
375 broadcast_error_message(dev,
376 state,
377 "resume",
378 report_resume);
379
380 pci_info(dev, "AER: Device recovery successful\n");
381 return;
382
383failed:
384 pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
385
386 /* TODO: Should kernel panic here? */
387 pci_info(dev, "AER: Device recovery failed\n");
388}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d0c6783dbfe3..2bb5db7b53e6 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,8 +11,6 @@
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13 13
14extern bool pcie_ports_native;
15
16/* Service Type */ 14/* Service Type */
17#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ 15#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
18#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT) 16#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
@@ -112,4 +110,7 @@ static inline bool pcie_pme_no_msi(void) { return false; }
112static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {} 110static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
113#endif /* !CONFIG_PCIE_PME */ 111#endif /* !CONFIG_PCIE_PME */
114 112
113struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
114 u32 service);
115struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
115#endif /* _PORTDRV_H_ */ 116#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
deleted file mode 100644
index 8ab5d434b9c6..000000000000
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ /dev/null
@@ -1,57 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe Port Native Services Support, ACPI-Related Part
4 *
5 * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 */
7
8#include <linux/pci.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/acpi.h>
12#include <linux/pci-acpi.h>
13
14#include "aer/aerdrv.h"
15#include "../pci.h"
16#include "portdrv.h"
17
18/**
19 * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
20 * @port: PCIe Port service for a root port or event collector.
21 * @srv_mask: Bit mask of services that can be enabled for @port.
22 *
23 * Invoked when @port is identified as a PCIe port device. To avoid conflicts
24 * with the BIOS PCIe port native services support requires the BIOS to yield
25 * control of these services to the kernel. The mask of services that the BIOS
26 * allows to be enabled for @port is written to @srv_mask.
27 *
28 * NOTE: It turns out that we cannot do that for individual port services
29 * separately, because that would make some systems work incorrectly.
30 */
31void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
32{
33 struct acpi_pci_root *root;
34 acpi_handle handle;
35 u32 flags;
36
37 if (acpi_pci_disabled)
38 return;
39
40 handle = acpi_find_root_bridge_handle(port);
41 if (!handle)
42 return;
43
44 root = acpi_pci_find_root(handle);
45 if (!root)
46 return;
47
48 flags = root->osc_control_set;
49
50 *srv_mask = 0;
51 if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
52 *srv_mask |= PCIE_PORT_SERVICE_HP;
53 if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
54 *srv_mask |= PCIE_PORT_SERVICE_PME;
55 if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
56 *srv_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
57}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index c9c0663db282..e0261ad4bcdd 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -19,6 +19,12 @@
19#include "../pci.h" 19#include "../pci.h"
20#include "portdrv.h" 20#include "portdrv.h"
21 21
22struct portdrv_service_data {
23 struct pcie_port_service_driver *drv;
24 struct device *dev;
25 u32 service;
26};
27
22/** 28/**
23 * release_pcie_device - free PCI Express port service device structure 29 * release_pcie_device - free PCI Express port service device structure
24 * @dev: Port service device to release 30 * @dev: Port service device to release
@@ -199,7 +205,7 @@ static int get_port_device_capability(struct pci_dev *dev)
199 int services = 0; 205 int services = 0;
200 206
201 if (dev->is_hotplug_bridge && 207 if (dev->is_hotplug_bridge &&
202 (pcie_ports_native || host->native_hotplug)) { 208 (pcie_ports_native || host->native_pcie_hotplug)) {
203 services |= PCIE_PORT_SERVICE_HP; 209 services |= PCIE_PORT_SERVICE_HP;
204 210
205 /* 211 /*
@@ -398,6 +404,69 @@ static int remove_iter(struct device *dev, void *data)
398 return 0; 404 return 0;
399} 405}
400 406
407static int find_service_iter(struct device *device, void *data)
408{
409 struct pcie_port_service_driver *service_driver;
410 struct portdrv_service_data *pdrvs;
411 u32 service;
412
413 pdrvs = (struct portdrv_service_data *) data;
414 service = pdrvs->service;
415
416 if (device->bus == &pcie_port_bus_type && device->driver) {
417 service_driver = to_service_driver(device->driver);
418 if (service_driver->service == service) {
419 pdrvs->drv = service_driver;
420 pdrvs->dev = device;
421 return 1;
422 }
423 }
424
425 return 0;
426}
427
428/**
429 * pcie_port_find_service - find the service driver
430 * @dev: PCI Express port the service is associated with
431 * @service: Service to find
432 *
433 * Find PCI Express port service driver associated with given service
434 */
435struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
436 u32 service)
437{
438 struct pcie_port_service_driver *drv;
439 struct portdrv_service_data pdrvs;
440
441 pdrvs.drv = NULL;
442 pdrvs.service = service;
443 device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
444
445 drv = pdrvs.drv;
446 return drv;
447}
448
449/**
450 * pcie_port_find_device - find the struct device
451 * @dev: PCI Express port the service is associated with
452 * @service: For the service to find
453 *
454 * Find the struct device associated with given service on a pci_dev
455 */
456struct device *pcie_port_find_device(struct pci_dev *dev,
457 u32 service)
458{
459 struct device *device;
460 struct portdrv_service_data pdrvs;
461
462 pdrvs.dev = NULL;
463 pdrvs.service = service;
464 device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
465
466 device = pdrvs.dev;
467 return device;
468}
469
401/** 470/**
402 * pcie_port_device_remove - unregister PCI Express port service devices 471 * pcie_port_device_remove - unregister PCI Express port service devices
403 * @dev: PCI Express port the service devices to unregister are associated with 472 * @dev: PCI Express port the service devices to unregister are associated with
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ac91b6fd0bcd..ac876e32de4b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -526,12 +526,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
526 526
527 if (bridge->release_fn) 527 if (bridge->release_fn)
528 bridge->release_fn(bridge); 528 bridge->release_fn(bridge);
529
530 pci_free_resource_list(&bridge->windows);
529} 531}
530 532
531static void pci_release_host_bridge_dev(struct device *dev) 533static void pci_release_host_bridge_dev(struct device *dev)
532{ 534{
533 devm_pci_release_host_bridge_dev(dev); 535 devm_pci_release_host_bridge_dev(dev);
534 pci_free_host_bridge(to_pci_host_bridge(dev)); 536 kfree(to_pci_host_bridge(dev));
535} 537}
536 538
537struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) 539struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -552,8 +554,10 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
552 * OS from interfering. 554 * OS from interfering.
553 */ 555 */
554 bridge->native_aer = 1; 556 bridge->native_aer = 1;
555 bridge->native_hotplug = 1; 557 bridge->native_pcie_hotplug = 1;
558 bridge->native_shpc_hotplug = 1;
556 bridge->native_pme = 1; 559 bridge->native_pme = 1;
560 bridge->native_ltr = 1;
557 561
558 return bridge; 562 return bridge;
559} 563}
@@ -882,6 +886,45 @@ free:
882 return err; 886 return err;
883} 887}
884 888
889static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
890{
891 int pos;
892 u32 status;
893
894 /*
895 * If extended config space isn't accessible on a bridge's primary
896 * bus, we certainly can't access it on the secondary bus.
897 */
898 if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
899 return false;
900
901 /*
902 * PCIe Root Ports and switch ports are PCIe on both sides, so if
903 * extended config space is accessible on the primary, it's also
904 * accessible on the secondary.
905 */
906 if (pci_is_pcie(bridge) &&
907 (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
908 pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
909 pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM))
910 return true;
911
912 /*
913 * For the other bridge types:
914 * - PCI-to-PCI bridges
915 * - PCIe-to-PCI/PCI-X forward bridges
916 * - PCI/PCI-X-to-PCIe reverse bridges
917 * extended config space on the secondary side is only accessible
918 * if the bridge supports PCI-X Mode 2.
919 */
920 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
921 if (!pos)
922 return false;
923
924 pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status);
925 return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ);
926}
927
885static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 928static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
886 struct pci_dev *bridge, int busnr) 929 struct pci_dev *bridge, int busnr)
887{ 930{
@@ -923,6 +966,16 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
923 pci_set_bus_of_node(child); 966 pci_set_bus_of_node(child);
924 pci_set_bus_speed(child); 967 pci_set_bus_speed(child);
925 968
969 /*
970 * Check whether extended config space is accessible on the child
971 * bus. Note that we currently assume it is always accessible on
972 * the root bus.
973 */
974 if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
975 child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
976 pci_info(child, "extended config space not accessible\n");
977 }
978
926 /* Set up default resource pointers and names */ 979 /* Set up default resource pointers and names */
927 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 980 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
928 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 981 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@@ -998,6 +1051,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
998 * already configured by the BIOS and after we are done with all of 1051 * already configured by the BIOS and after we are done with all of
999 * them, we proceed to assigning numbers to the remaining buses in 1052 * them, we proceed to assigning numbers to the remaining buses in
1000 * order to avoid overlaps between old and new bus numbers. 1053 * order to avoid overlaps between old and new bus numbers.
1054 *
1055 * Return: New subordinate number covering all buses behind this bridge.
1001 */ 1056 */
1002static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, 1057static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1003 int max, unsigned int available_buses, 1058 int max, unsigned int available_buses,
@@ -1188,20 +1243,15 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1188 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 1243 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1189 pci_domain_nr(bus), child->number); 1244 pci_domain_nr(bus), child->number);
1190 1245
1191 /* Has only triggered on CardBus, fixup is in yenta_socket */ 1246 /* Check that all devices are accessible */
1192 while (bus->parent) { 1247 while (bus->parent) {
1193 if ((child->busn_res.end > bus->busn_res.end) || 1248 if ((child->busn_res.end > bus->busn_res.end) ||
1194 (child->number > bus->busn_res.end) || 1249 (child->number > bus->busn_res.end) ||
1195 (child->number < bus->number) || 1250 (child->number < bus->number) ||
1196 (child->busn_res.end < bus->number)) { 1251 (child->busn_res.end < bus->number)) {
1197 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n", 1252 dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
1198 &child->busn_res, 1253 &child->busn_res);
1199 (bus->number > child->busn_res.end && 1254 break;
1200 bus->busn_res.end < child->number) ?
1201 "wholly" : "partially",
1202 bus->self->transparent ? " transparent" : "",
1203 dev_name(&bus->dev),
1204 &bus->busn_res);
1205 } 1255 }
1206 bus = bus->parent; 1256 bus = bus->parent;
1207 } 1257 }
@@ -1230,6 +1280,8 @@ out:
1230 * already configured by the BIOS and after we are done with all of 1280 * already configured by the BIOS and after we are done with all of
1231 * them, we proceed to assigning numbers to the remaining buses in 1281 * them, we proceed to assigning numbers to the remaining buses in
1232 * order to avoid overlaps between old and new bus numbers. 1282 * order to avoid overlaps between old and new bus numbers.
1283 *
1284 * Return: New subordinate number covering all buses behind this bridge.
1233 */ 1285 */
1234int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 1286int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1235{ 1287{
@@ -1393,6 +1445,9 @@ int pci_cfg_space_size(struct pci_dev *dev)
1393 u32 status; 1445 u32 status;
1394 u16 class; 1446 u16 class;
1395 1447
1448 if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
1449 return PCI_CFG_SPACE_SIZE;
1450
1396 class = dev->class >> 8; 1451 class = dev->class >> 8;
1397 if (class == PCI_CLASS_BRIDGE_HOST) 1452 if (class == PCI_CLASS_BRIDGE_HOST)
1398 return pci_cfg_space_size_ext(dev); 1453 return pci_cfg_space_size_ext(dev);
@@ -1954,9 +2009,13 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1954static void pci_configure_ltr(struct pci_dev *dev) 2009static void pci_configure_ltr(struct pci_dev *dev)
1955{ 2010{
1956#ifdef CONFIG_PCIEASPM 2011#ifdef CONFIG_PCIEASPM
2012 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
1957 u32 cap; 2013 u32 cap;
1958 struct pci_dev *bridge; 2014 struct pci_dev *bridge;
1959 2015
2016 if (!host->native_ltr)
2017 return;
2018
1960 if (!pci_is_pcie(dev)) 2019 if (!pci_is_pcie(dev))
1961 return; 2020 return;
1962 2021
@@ -2638,7 +2697,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2638 for_each_pci_bridge(dev, bus) { 2697 for_each_pci_bridge(dev, bus) {
2639 cmax = max; 2698 cmax = max;
2640 max = pci_scan_bridge_extend(bus, dev, max, 0, 0); 2699 max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
2641 used_buses += cmax - max; 2700
2701 /*
2702 * Reserve one bus for each bridge now to avoid extending
2703 * hotplug bridges too much during the second scan below.
2704 */
2705 used_buses++;
2706 if (cmax - max > 1)
2707 used_buses += cmax - max - 1;
2642 } 2708 }
2643 2709
2644 /* Scan bridges that need to be reconfigured */ 2710 /* Scan bridges that need to be reconfigured */
@@ -2661,12 +2727,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2661 * bridges if any. 2727 * bridges if any.
2662 */ 2728 */
2663 buses = available_buses / hotplug_bridges; 2729 buses = available_buses / hotplug_bridges;
2664 buses = min(buses, available_buses - used_buses); 2730 buses = min(buses, available_buses - used_buses + 1);
2665 } 2731 }
2666 2732
2667 cmax = max; 2733 cmax = max;
2668 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1); 2734 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
2669 used_buses += max - cmax; 2735 /* One bus is already accounted so don't add it again */
2736 if (max - cmax > 1)
2737 used_buses += max - cmax - 1;
2670 } 2738 }
2671 2739
2672 /* 2740 /*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2990ad1e7c99..f439de848658 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -30,6 +30,162 @@
30#include <asm/dma.h> /* isa_dma_bridge_buggy */ 30#include <asm/dma.h> /* isa_dma_bridge_buggy */
31#include "pci.h" 31#include "pci.h"
32 32
33static ktime_t fixup_debug_start(struct pci_dev *dev,
34 void (*fn)(struct pci_dev *dev))
35{
36 if (initcall_debug)
37 pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
38
39 return ktime_get();
40}
41
42static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
43 void (*fn)(struct pci_dev *dev))
44{
45 ktime_t delta, rettime;
46 unsigned long long duration;
47
48 rettime = ktime_get();
49 delta = ktime_sub(rettime, calltime);
50 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
51 if (initcall_debug || duration > 10000)
52 pci_info(dev, "%pF took %lld usecs\n", fn, duration);
53}
54
55static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
56 struct pci_fixup *end)
57{
58 ktime_t calltime;
59
60 for (; f < end; f++)
61 if ((f->class == (u32) (dev->class >> f->class_shift) ||
62 f->class == (u32) PCI_ANY_ID) &&
63 (f->vendor == dev->vendor ||
64 f->vendor == (u16) PCI_ANY_ID) &&
65 (f->device == dev->device ||
66 f->device == (u16) PCI_ANY_ID)) {
67 calltime = fixup_debug_start(dev, f->hook);
68 f->hook(dev);
69 fixup_debug_report(dev, calltime, f->hook);
70 }
71}
72
73extern struct pci_fixup __start_pci_fixups_early[];
74extern struct pci_fixup __end_pci_fixups_early[];
75extern struct pci_fixup __start_pci_fixups_header[];
76extern struct pci_fixup __end_pci_fixups_header[];
77extern struct pci_fixup __start_pci_fixups_final[];
78extern struct pci_fixup __end_pci_fixups_final[];
79extern struct pci_fixup __start_pci_fixups_enable[];
80extern struct pci_fixup __end_pci_fixups_enable[];
81extern struct pci_fixup __start_pci_fixups_resume[];
82extern struct pci_fixup __end_pci_fixups_resume[];
83extern struct pci_fixup __start_pci_fixups_resume_early[];
84extern struct pci_fixup __end_pci_fixups_resume_early[];
85extern struct pci_fixup __start_pci_fixups_suspend[];
86extern struct pci_fixup __end_pci_fixups_suspend[];
87extern struct pci_fixup __start_pci_fixups_suspend_late[];
88extern struct pci_fixup __end_pci_fixups_suspend_late[];
89
90static bool pci_apply_fixup_final_quirks;
91
92void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
93{
94 struct pci_fixup *start, *end;
95
96 switch (pass) {
97 case pci_fixup_early:
98 start = __start_pci_fixups_early;
99 end = __end_pci_fixups_early;
100 break;
101
102 case pci_fixup_header:
103 start = __start_pci_fixups_header;
104 end = __end_pci_fixups_header;
105 break;
106
107 case pci_fixup_final:
108 if (!pci_apply_fixup_final_quirks)
109 return;
110 start = __start_pci_fixups_final;
111 end = __end_pci_fixups_final;
112 break;
113
114 case pci_fixup_enable:
115 start = __start_pci_fixups_enable;
116 end = __end_pci_fixups_enable;
117 break;
118
119 case pci_fixup_resume:
120 start = __start_pci_fixups_resume;
121 end = __end_pci_fixups_resume;
122 break;
123
124 case pci_fixup_resume_early:
125 start = __start_pci_fixups_resume_early;
126 end = __end_pci_fixups_resume_early;
127 break;
128
129 case pci_fixup_suspend:
130 start = __start_pci_fixups_suspend;
131 end = __end_pci_fixups_suspend;
132 break;
133
134 case pci_fixup_suspend_late:
135 start = __start_pci_fixups_suspend_late;
136 end = __end_pci_fixups_suspend_late;
137 break;
138
139 default:
140 /* stupid compiler warning, you would think with an enum... */
141 return;
142 }
143 pci_do_fixups(dev, start, end);
144}
145EXPORT_SYMBOL(pci_fixup_device);
146
147static int __init pci_apply_final_quirks(void)
148{
149 struct pci_dev *dev = NULL;
150 u8 cls = 0;
151 u8 tmp;
152
153 if (pci_cache_line_size)
154 printk(KERN_DEBUG "PCI: CLS %u bytes\n",
155 pci_cache_line_size << 2);
156
157 pci_apply_fixup_final_quirks = true;
158 for_each_pci_dev(dev) {
159 pci_fixup_device(pci_fixup_final, dev);
160 /*
161 * If arch hasn't set it explicitly yet, use the CLS
162 * value shared by all PCI devices. If there's a
163 * mismatch, fall back to the default value.
164 */
165 if (!pci_cache_line_size) {
166 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
167 if (!cls)
168 cls = tmp;
169 if (!tmp || cls == tmp)
170 continue;
171
172 printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
173 cls << 2, tmp << 2,
174 pci_dfl_cache_line_size << 2);
175 pci_cache_line_size = pci_dfl_cache_line_size;
176 }
177 }
178
179 if (!pci_cache_line_size) {
180 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
181 cls << 2, pci_dfl_cache_line_size << 2);
182 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
183 }
184
185 return 0;
186}
187fs_initcall_sync(pci_apply_final_quirks);
188
33/* 189/*
34 * Decoding should be disabled for a PCI device during BAR sizing to avoid 190 * Decoding should be disabled for a PCI device during BAR sizing to avoid
35 * conflict. But doing so may cause problems on host bridge and perhaps other 191 * conflict. But doing so may cause problems on host bridge and perhaps other
@@ -43,9 +199,10 @@ static void quirk_mmio_always_on(struct pci_dev *dev)
43DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, 199DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
44 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); 200 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
45 201
46/* The Mellanox Tavor device gives false positive parity errors 202/*
47 * Mark this device with a broken_parity_status, to allow 203 * The Mellanox Tavor device gives false positive parity errors. Mark this
48 * PCI scanning code to "skip" this now blacklisted device. 204 * device with a broken_parity_status to allow PCI scanning code to "skip"
205 * this now blacklisted device.
49 */ 206 */
50static void quirk_mellanox_tavor(struct pci_dev *dev) 207static void quirk_mellanox_tavor(struct pci_dev *dev)
51{ 208{
@@ -54,15 +211,19 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
54DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor); 211DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
55DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor); 212DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
56 213
57/* Deal with broken BIOSes that neglect to enable passive release, 214/*
58 which can cause problems in combination with the 82441FX/PPro MTRRs */ 215 * Deal with broken BIOSes that neglect to enable passive release,
216 * which can cause problems in combination with the 82441FX/PPro MTRRs
217 */
59static void quirk_passive_release(struct pci_dev *dev) 218static void quirk_passive_release(struct pci_dev *dev)
60{ 219{
61 struct pci_dev *d = NULL; 220 struct pci_dev *d = NULL;
62 unsigned char dlc; 221 unsigned char dlc;
63 222
64 /* We have to make sure a particular bit is set in the PIIX3 223 /*
65 ISA bridge, so we have to go out and find it. */ 224 * We have to make sure a particular bit is set in the PIIX3
225 * ISA bridge, so we have to go out and find it.
226 */
66 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { 227 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
67 pci_read_config_byte(d, 0x82, &dlc); 228 pci_read_config_byte(d, 0x82, &dlc);
68 if (!(dlc & 1<<1)) { 229 if (!(dlc & 1<<1)) {
@@ -75,13 +236,14 @@ static void quirk_passive_release(struct pci_dev *dev)
75DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); 236DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
76DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); 237DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
77 238
78/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround 239/*
79 but VIA don't answer queries. If you happen to have good contacts at VIA 240 * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
80 ask them for me please -- Alan 241 * workaround but VIA don't answer queries. If you happen to have good
81 242 * contacts at VIA ask them for me please -- Alan
82 This appears to be BIOS not version dependent. So presumably there is a 243 *
83 chipset level fix */ 244 * This appears to be BIOS not version dependent. So presumably there is a
84 245 * chipset level fix.
246 */
85static void quirk_isa_dma_hangs(struct pci_dev *dev) 247static void quirk_isa_dma_hangs(struct pci_dev *dev)
86{ 248{
87 if (!isa_dma_bridge_buggy) { 249 if (!isa_dma_bridge_buggy) {
@@ -89,10 +251,10 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
89 pci_info(dev, "Activating ISA DMA hang workarounds\n"); 251 pci_info(dev, "Activating ISA DMA hang workarounds\n");
90 } 252 }
91} 253}
92 /* 254/*
93 * Its not totally clear which chipsets are the problematic ones 255 * It's not totally clear which chipsets are the problematic ones. We know
94 * We know 82C586 and 82C596 variants are affected. 256 * 82C586 and 82C596 variants are affected.
95 */ 257 */
96DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs); 258DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
97DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs); 259DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
98DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs); 260DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
@@ -121,9 +283,7 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
121} 283}
122DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); 284DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
123 285
124/* 286/* Chipsets where PCI->PCI transfers vanish or hang */
125 * Chipsets where PCI->PCI transfers vanish or hang
126 */
127static void quirk_nopcipci(struct pci_dev *dev) 287static void quirk_nopcipci(struct pci_dev *dev)
128{ 288{
129 if ((pci_pci_problems & PCIPCI_FAIL) == 0) { 289 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
@@ -146,9 +306,7 @@ static void quirk_nopciamd(struct pci_dev *dev)
146} 306}
147DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd); 307DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
148 308
149/* 309/* Triton requires workarounds to be used by the drivers */
150 * Triton requires workarounds to be used by the drivers
151 */
152static void quirk_triton(struct pci_dev *dev) 310static void quirk_triton(struct pci_dev *dev)
153{ 311{
154 if ((pci_pci_problems&PCIPCI_TRITON) == 0) { 312 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
@@ -162,53 +320,62 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_tr
162DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); 320DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
163 321
164/* 322/*
165 * VIA Apollo KT133 needs PCI latency patch 323 * VIA Apollo KT133 needs PCI latency patch
166 * Made according to a windows driver based patch by George E. Breese 324 * Made according to a Windows driver-based patch by George E. Breese;
167 * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm 325 * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
168 * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for 326 * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for the info on
169 * the info on which Mr Breese based his work. 327 * which Mr Breese based his work.
170 * 328 *
171 * Updated based on further information from the site and also on 329 * Updated based on further information from the site and also on
172 * information provided by VIA 330 * information provided by VIA
173 */ 331 */
174static void quirk_vialatency(struct pci_dev *dev) 332static void quirk_vialatency(struct pci_dev *dev)
175{ 333{
176 struct pci_dev *p; 334 struct pci_dev *p;
177 u8 busarb; 335 u8 busarb;
178 /* Ok we have a potential problem chipset here. Now see if we have
179 a buggy southbridge */
180 336
337 /*
338 * Ok, we have a potential problem chipset here. Now see if we have
339 * a buggy southbridge.
340 */
181 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); 341 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
182 if (p != NULL) { 342 if (p != NULL) {
183 /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ 343
184 /* Check for buggy part revisions */ 344 /*
345 * 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A;
346 * thanks Dan Hollis.
347 * Check for buggy part revisions
348 */
185 if (p->revision < 0x40 || p->revision > 0x42) 349 if (p->revision < 0x40 || p->revision > 0x42)
186 goto exit; 350 goto exit;
187 } else { 351 } else {
188 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); 352 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
189 if (p == NULL) /* No problem parts */ 353 if (p == NULL) /* No problem parts */
190 goto exit; 354 goto exit;
355
191 /* Check for buggy part revisions */ 356 /* Check for buggy part revisions */
192 if (p->revision < 0x10 || p->revision > 0x12) 357 if (p->revision < 0x10 || p->revision > 0x12)
193 goto exit; 358 goto exit;
194 } 359 }
195 360
196 /* 361 /*
197 * Ok we have the problem. Now set the PCI master grant to 362 * Ok we have the problem. Now set the PCI master grant to occur
198 * occur every master grant. The apparent bug is that under high 363 * every master grant. The apparent bug is that under high PCI load
199 * PCI load (quite common in Linux of course) you can get data 364 * (quite common in Linux of course) you can get data loss when the
200 * loss when the CPU is held off the bus for 3 bus master requests 365 * CPU is held off the bus for 3 bus master requests. This happens
201 * This happens to include the IDE controllers.... 366 * to include the IDE controllers....
202 * 367 *
203 * VIA only apply this fix when an SB Live! is present but under 368 * VIA only apply this fix when an SB Live! is present but under
204 * both Linux and Windows this isn't enough, and we have seen 369 * both Linux and Windows this isn't enough, and we have seen
205 * corruption without SB Live! but with things like 3 UDMA IDE 370 * corruption without SB Live! but with things like 3 UDMA IDE
206 * controllers. So we ignore that bit of the VIA recommendation.. 371 * controllers. So we ignore that bit of the VIA recommendation..
207 */ 372 */
208
209 pci_read_config_byte(dev, 0x76, &busarb); 373 pci_read_config_byte(dev, 0x76, &busarb);
210 /* Set bit 4 and bi 5 of byte 76 to 0x01 374
211 "Master priority rotation on every PCI master grant */ 375 /*
376 * Set bit 4 and bit 5 of byte 76 to 0x01
377 * "Master priority rotation on every PCI master grant"
378 */
212 busarb &= ~(1<<5); 379 busarb &= ~(1<<5);
213 busarb |= (1<<4); 380 busarb |= (1<<4);
214 pci_write_config_byte(dev, 0x76, busarb); 381 pci_write_config_byte(dev, 0x76, busarb);
@@ -224,9 +391,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vial
224DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency); 391DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
225DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency); 392DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
226 393
227/* 394/* VIA Apollo VP3 needs ETBF on BT848/878 */
228 * VIA Apollo VP3 needs ETBF on BT848/878
229 */
230static void quirk_viaetbf(struct pci_dev *dev) 395static void quirk_viaetbf(struct pci_dev *dev)
231{ 396{
232 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) { 397 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
@@ -246,10 +411,9 @@ static void quirk_vsfx(struct pci_dev *dev)
246DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx); 411DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
247 412
248/* 413/*
249 * Ali Magik requires workarounds to be used by the drivers 414 * ALi Magik requires workarounds to be used by the drivers that DMA to AGP
250 * that DMA to AGP space. Latency must be set to 0xA and triton 415 * space. Latency must be set to 0xA and Triton workaround applied too.
251 * workaround applied too 416 * [Info kindly provided by ALi]
252 * [Info kindly provided by ALi]
253 */ 417 */
254static void quirk_alimagik(struct pci_dev *dev) 418static void quirk_alimagik(struct pci_dev *dev)
255{ 419{
@@ -261,10 +425,7 @@ static void quirk_alimagik(struct pci_dev *dev)
261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); 425DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
262DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); 426DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
263 427
264/* 428/* Natoma has some interesting boundary conditions with Zoran stuff at least */
265 * Natoma has some interesting boundary conditions with Zoran stuff
266 * at least
267 */
268static void quirk_natoma(struct pci_dev *dev) 429static void quirk_natoma(struct pci_dev *dev)
269{ 430{
270 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) { 431 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
@@ -280,8 +441,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quir
280DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); 441DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
281 442
282/* 443/*
283 * This chip can cause PCI parity errors if config register 0xA0 is read 444 * This chip can cause PCI parity errors if config register 0xA0 is read
284 * while DMAs are occurring. 445 * while DMAs are occurring.
285 */ 446 */
286static void quirk_citrine(struct pci_dev *dev) 447static void quirk_citrine(struct pci_dev *dev)
287{ 448{
@@ -321,8 +482,8 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page); 482DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
322 483
323/* 484/*
324 * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. 485 * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
325 * If it's needed, re-allocate the region. 486 * If it's needed, re-allocate the region.
326 */ 487 */
327static void quirk_s3_64M(struct pci_dev *dev) 488static void quirk_s3_64M(struct pci_dev *dev)
328{ 489{
@@ -413,8 +574,8 @@ static void quirk_io_region(struct pci_dev *dev, int port,
413} 574}
414 575
415/* 576/*
416 * ATI Northbridge setups MCE the processor if you even 577 * ATI Northbridge setups MCE the processor if you even read somewhere
417 * read somewhere between 0x3b0->0x3bb or read 0x3d3 578 * between 0x3b0->0x3bb or read 0x3d3
418 */ 579 */
419static void quirk_ati_exploding_mce(struct pci_dev *dev) 580static void quirk_ati_exploding_mce(struct pci_dev *dev)
420{ 581{
@@ -429,6 +590,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
429 * In the AMD NL platform, this device ([1022:7912]) has a class code of 590 * In the AMD NL platform, this device ([1022:7912]) has a class code of
430 * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will 591 * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
431 * claim it. 592 * claim it.
593 *
432 * But the dwc3 driver is a more specific driver for this device, and we'd 594 * But the dwc3 driver is a more specific driver for this device, and we'd
433 * prefer to use it instead of xhci. To prevent xhci from claiming the 595 * prefer to use it instead of xhci. To prevent xhci from claiming the
434 * device, change the class code to 0x0c03fe, which the PCI r3.0 spec 596 * device, change the class code to 0x0c03fe, which the PCI r3.0 spec
@@ -448,11 +610,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
448 quirk_amd_nl_class); 610 quirk_amd_nl_class);
449 611
450/* 612/*
451 * Let's make the southbridge information explicit instead 613 * Let's make the southbridge information explicit instead of having to
452 * of having to worry about people probing the ACPI areas, 614 * worry about people probing the ACPI areas, for example.. (Yes, it
453 * for example.. (Yes, it happens, and if you read the wrong 615 * happens, and if you read the wrong ACPI register it will put the machine
454 * ACPI register it will put the machine to sleep with no 616 * to sleep with no way of waking it up again. Bummer).
455 * way of waking it up again. Bummer).
456 * 617 *
457 * ALI M7101: Two IO regions pointed to by words at 618 * ALI M7101: Two IO regions pointed to by words at
458 * 0xE0 (64 bytes of ACPI registers) 619 * 0xE0 (64 bytes of ACPI registers)
@@ -508,6 +669,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
508 break; 669 break;
509 size = bit; 670 size = bit;
510 } 671 }
672
511 /* 673 /*
512 * For now we only print it out. Eventually we'll want to 674 * For now we only print it out. Eventually we'll want to
513 * reserve it, but let's get enough confirmation reports first. 675 * reserve it, but let's get enough confirmation reports first.
@@ -579,8 +741,7 @@ static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
579 * priority and can't tell whether the legacy device or the one created 741 * priority and can't tell whether the legacy device or the one created
580 * here is really at that address. This happens on boards with broken 742 * here is really at that address. This happens on boards with broken
581 * BIOSes. 743 * BIOSes.
582 */ 744 */
583
584 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); 745 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
585 if (enable & ICH4_ACPI_EN) 746 if (enable & ICH4_ACPI_EN)
586 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES, 747 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
@@ -617,7 +778,8 @@ static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
617 "ICH6 GPIO"); 778 "ICH6 GPIO");
618} 779}
619 780
620static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) 781static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
782 const char *name, int dynsize)
621{ 783{
622 u32 val; 784 u32 val;
623 u32 size, base; 785 u32 size, base;
@@ -641,7 +803,10 @@ static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
641 } 803 }
642 base &= ~(size-1); 804 base &= ~(size-1);
643 805
644 /* Just print it out for now. We should reserve it after more debugging */ 806 /*
807 * Just print it out for now. We should reserve it after more
808 * debugging.
809 */
645 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); 810 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
646} 811}
647 812
@@ -657,7 +822,8 @@ static void quirk_ich6_lpc(struct pci_dev *dev)
657DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc); 822DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
658DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc); 823DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
659 824
660static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name) 825static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
826 const char *name)
661{ 827{
662 u32 val; 828 u32 val;
663 u32 mask, base; 829 u32 mask, base;
@@ -668,15 +834,15 @@ static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
668 if (!(val & 1)) 834 if (!(val & 1))
669 return; 835 return;
670 836
671 /* 837 /* IO base in bits 15:2, mask in bits 23:18, both are dword-based */
672 * IO base in bits 15:2, mask in bits 23:18, both
673 * are dword-based
674 */
675 base = val & 0xfffc; 838 base = val & 0xfffc;
676 mask = (val >> 16) & 0xfc; 839 mask = (val >> 16) & 0xfc;
677 mask |= 3; 840 mask |= 3;
678 841
679 /* Just print it out for now. We should reserve it after more debugging */ 842 /*
843 * Just print it out for now. We should reserve it after more
844 * debugging.
845 */
680 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); 846 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
681} 847}
682 848
@@ -748,8 +914,8 @@ static void quirk_vt8235_acpi(struct pci_dev *dev)
748DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); 914DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
749 915
750/* 916/*
751 * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back: 917 * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast
752 * Disable fast back-to-back on the secondary bus segment 918 * back-to-back: Disable fast back-to-back on the secondary bus segment
753 */ 919 */
754static void quirk_xio2000a(struct pci_dev *dev) 920static void quirk_xio2000a(struct pci_dev *dev)
755{ 921{
@@ -774,8 +940,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
774 * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip 940 * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
775 * devices to the external APIC. 941 * devices to the external APIC.
776 * 942 *
777 * TODO: When we have device-specific interrupt routers, 943 * TODO: When we have device-specific interrupt routers, this code will go
778 * this code will go away from quirks. 944 * away from quirks.
779 */ 945 */
780static void quirk_via_ioapic(struct pci_dev *dev) 946static void quirk_via_ioapic(struct pci_dev *dev)
781{ 947{
@@ -816,13 +982,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt
816DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); 982DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
817 983
818/* 984/*
819 * The AMD io apic can hang the box when an apic irq is masked. 985 * The AMD IO-APIC can hang the box when an APIC IRQ is masked.
820 * We check all revs >= B0 (yet not in the pre production!) as the bug 986 * We check all revs >= B0 (yet not in the pre production!) as the bug
821 * is currently marked NoFix 987 * is currently marked NoFix
822 * 988 *
823 * We have multiple reports of hangs with this chipset that went away with 989 * We have multiple reports of hangs with this chipset that went away with
824 * noapic specified. For the moment we assume it's the erratum. We may be wrong 990 * noapic specified. For the moment we assume it's the erratum. We may be wrong
825 * of course. However the advice is demonstrably good even if so.. 991 * of course. However the advice is demonstrably good even if so.
826 */ 992 */
827static void quirk_amd_ioapic(struct pci_dev *dev) 993static void quirk_amd_ioapic(struct pci_dev *dev)
828{ 994{
@@ -838,7 +1004,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_a
838 1004
839static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev) 1005static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
840{ 1006{
841 /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */ 1007 /* Fix for improper SR-IOV configuration on Cavium cn88xx RNM device */
842 if (dev->subsystem_device == 0xa118) 1008 if (dev->subsystem_device == 0xa118)
843 dev->sriov->link = dev->devfn; 1009 dev->sriov->link = dev->devfn;
844} 1010}
@@ -860,19 +1026,17 @@ static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
860DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc); 1026DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
861 1027
862/* 1028/*
863 * FIXME: it is questionable that quirk_via_acpi 1029 * FIXME: it is questionable that quirk_via_acpi() is needed. It shows up
864 * is needed. It shows up as an ISA bridge, and does not 1030 * as an ISA bridge, and does not support the PCI_INTERRUPT_LINE register
865 * support the PCI_INTERRUPT_LINE register at all. Therefore 1031 * at all. Therefore it seems like setting the pci_dev's IRQ to the value
866 * it seems like setting the pci_dev's 'irq' to the 1032 * of the ACPI SCI interrupt is only done for convenience.
867 * value of the ACPI SCI interrupt is only done for convenience.
868 * -jgarzik 1033 * -jgarzik
869 */ 1034 */
870static void quirk_via_acpi(struct pci_dev *d) 1035static void quirk_via_acpi(struct pci_dev *d)
871{ 1036{
872 /*
873 * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
874 */
875 u8 irq; 1037 u8 irq;
1038
1039 /* VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */
876 pci_read_config_byte(d, 0x42, &irq); 1040 pci_read_config_byte(d, 0x42, &irq);
877 irq &= 0xf; 1041 irq &= 0xf;
878 if (irq && (irq != 2)) 1042 if (irq && (irq != 2))
@@ -881,11 +1045,7 @@ static void quirk_via_acpi(struct pci_dev *d)
881DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi); 1045DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
882DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi); 1046DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
883 1047
884 1048/* VIA bridges which have VLink */
885/*
886 * VIA bridges which have VLink
887 */
888
889static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18; 1049static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
890 1050
891static void quirk_via_bridge(struct pci_dev *dev) 1051static void quirk_via_bridge(struct pci_dev *dev)
@@ -893,9 +1053,11 @@ static void quirk_via_bridge(struct pci_dev *dev)
893 /* See what bridge we have and find the device ranges */ 1053 /* See what bridge we have and find the device ranges */
894 switch (dev->device) { 1054 switch (dev->device) {
895 case PCI_DEVICE_ID_VIA_82C686: 1055 case PCI_DEVICE_ID_VIA_82C686:
896 /* The VT82C686 is special, it attaches to PCI and can have 1056 /*
897 any device number. All its subdevices are functions of 1057 * The VT82C686 is special; it attaches to PCI and can have
898 that single device. */ 1058 * any device number. All its subdevices are functions of
1059 * that single device.
1060 */
899 via_vlink_dev_lo = PCI_SLOT(dev->devfn); 1061 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
900 via_vlink_dev_hi = PCI_SLOT(dev->devfn); 1062 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
901 break; 1063 break;
@@ -923,19 +1085,17 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_b
923DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge); 1085DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
924DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge); 1086DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
925 1087
926/** 1088/*
927 * quirk_via_vlink - VIA VLink IRQ number update 1089 * quirk_via_vlink - VIA VLink IRQ number update
928 * @dev: PCI device 1090 * @dev: PCI device
929 * 1091 *
930 * If the device we are dealing with is on a PIC IRQ we need to 1092 * If the device we are dealing with is on a PIC IRQ we need to ensure that
931 * ensure that the IRQ line register which usually is not relevant 1093 * the IRQ line register which usually is not relevant for PCI cards, is
932 * for PCI cards, is actually written so that interrupts get sent 1094 * actually written so that interrupts get sent to the right place.
933 * to the right place. 1095 *
934 * We only do this on systems where a VIA south bridge was detected, 1096 * We only do this on systems where a VIA south bridge was detected, and
935 * and only for VIA devices on the motherboard (see quirk_via_bridge 1097 * only for VIA devices on the motherboard (see quirk_via_bridge above).
936 * above).
937 */ 1098 */
938
939static void quirk_via_vlink(struct pci_dev *dev) 1099static void quirk_via_vlink(struct pci_dev *dev)
940{ 1100{
941 u8 irq, new_irq; 1101 u8 irq, new_irq;
@@ -955,9 +1115,10 @@ static void quirk_via_vlink(struct pci_dev *dev)
955 PCI_SLOT(dev->devfn) < via_vlink_dev_lo) 1115 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
956 return; 1116 return;
957 1117
958 /* This is an internal VLink device on a PIC interrupt. The BIOS 1118 /*
959 ought to have set this but may not have, so we redo it */ 1119 * This is an internal VLink device on a PIC interrupt. The BIOS
960 1120 * ought to have set this but may not have, so we redo it.
1121 */
961 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 1122 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
962 if (new_irq != irq) { 1123 if (new_irq != irq) {
963 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n", 1124 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
@@ -969,10 +1130,9 @@ static void quirk_via_vlink(struct pci_dev *dev)
969DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink); 1130DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
970 1131
971/* 1132/*
972 * VIA VT82C598 has its device ID settable and many BIOSes 1133 * VIA VT82C598 has its device ID settable and many BIOSes set it to the ID
973 * set it to the ID of VT82C597 for backward compatibility. 1134 * of VT82C597 for backward compatibility. We need to switch it off to be
974 * We need to switch it off to be able to recognize the real 1135 * able to recognize the real type of the chip.
975 * type of the chip.
976 */ 1136 */
977static void quirk_vt82c598_id(struct pci_dev *dev) 1137static void quirk_vt82c598_id(struct pci_dev *dev)
978{ 1138{
@@ -982,10 +1142,10 @@ static void quirk_vt82c598_id(struct pci_dev *dev)
982DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id); 1142DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
983 1143
984/* 1144/*
985 * CardBus controllers have a legacy base address that enables them 1145 * CardBus controllers have a legacy base address that enables them to
986 * to respond as i82365 pcmcia controllers. We don't want them to 1146 * respond as i82365 pcmcia controllers. We don't want them to do this
987 * do this even if the Linux CardBus driver is not loaded, because 1147 * even if the Linux CardBus driver is not loaded, because the Linux i82365
988 * the Linux i82365 driver does not (and should not) handle CardBus. 1148 * driver does not (and should not) handle CardBus.
989 */ 1149 */
990static void quirk_cardbus_legacy(struct pci_dev *dev) 1150static void quirk_cardbus_legacy(struct pci_dev *dev)
991{ 1151{
@@ -997,11 +1157,11 @@ DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
997 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy); 1157 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
998 1158
999/* 1159/*
1000 * Following the PCI ordering rules is optional on the AMD762. I'm not 1160 * Following the PCI ordering rules is optional on the AMD762. I'm not sure
1001 * sure what the designers were smoking but let's not inhale... 1161 * what the designers were smoking but let's not inhale...
1002 * 1162 *
1003 * To be fair to AMD, it follows the spec by default, its BIOS people 1163 * To be fair to AMD, it follows the spec by default, it's BIOS people who
1004 * who turn it off! 1164 * turn it off!
1005 */ 1165 */
1006static void quirk_amd_ordering(struct pci_dev *dev) 1166static void quirk_amd_ordering(struct pci_dev *dev)
1007{ 1167{
@@ -1020,11 +1180,11 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk
1020DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); 1180DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1021 1181
1022/* 1182/*
1023 * DreamWorks provided workaround for Dunord I-3000 problem 1183 * DreamWorks-provided workaround for Dunord I-3000 problem
1024 * 1184 *
1025 * This card decodes and responds to addresses not apparently 1185 * This card decodes and responds to addresses not apparently assigned to
1026 * assigned to it. We force a larger allocation to ensure that 1186 * it. We force a larger allocation to ensure that nothing gets put too
1027 * nothing gets put too close to it. 1187 * close to it.
1028 */ 1188 */
1029static void quirk_dunord(struct pci_dev *dev) 1189static void quirk_dunord(struct pci_dev *dev)
1030{ 1190{
@@ -1037,10 +1197,9 @@ static void quirk_dunord(struct pci_dev *dev)
1037DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord); 1197DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1038 1198
1039/* 1199/*
1040 * i82380FB mobile docking controller: its PCI-to-PCI bridge 1200 * i82380FB mobile docking controller: its PCI-to-PCI bridge is subtractive
1041 * is subtractive decoding (transparent), and does indicate this 1201 * decoding (transparent), and does indicate this in the ProgIf.
1042 * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80 1202 * Unfortunately, the ProgIf value is wrong - 0x80 instead of 0x01.
1043 * instead of 0x01.
1044 */ 1203 */
1045static void quirk_transparent_bridge(struct pci_dev *dev) 1204static void quirk_transparent_bridge(struct pci_dev *dev)
1046{ 1205{
@@ -1050,10 +1209,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk
1050DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge); 1209DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1051 1210
1052/* 1211/*
1053 * Common misconfiguration of the MediaGX/Geode PCI master that will 1212 * Common misconfiguration of the MediaGX/Geode PCI master that will reduce
1054 * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 1213 * PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 datasheets
1055 * datasheets found at http://www.national.com/analog for info on what 1214 * found at http://www.national.com/analog for info on what these bits do.
1056 * these bits do. <christer@weinigel.se> 1215 * <christer@weinigel.se>
1057 */ 1216 */
1058static void quirk_mediagx_master(struct pci_dev *dev) 1217static void quirk_mediagx_master(struct pci_dev *dev)
1059{ 1218{
@@ -1071,9 +1230,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qui
1071DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master); 1230DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1072 1231
1073/* 1232/*
1074 * Ensure C0 rev restreaming is off. This is normally done by 1233 * Ensure C0 rev restreaming is off. This is normally done by the BIOS but
1075 * the BIOS but in the odd case it is not the results are corruption 1234 * in the odd case it is not the results are corruption hence the presence
1076 * hence the presence of a Linux check 1235 * of a Linux check.
1077 */ 1236 */
1078static void quirk_disable_pxb(struct pci_dev *pdev) 1237static void quirk_disable_pxb(struct pci_dev *pdev)
1079{ 1238{
@@ -1117,9 +1276,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA
1117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); 1276DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1118DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); 1277DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1119 1278
1120/* 1279/* Serverworks CSB5 IDE does not fully support native mode */
1121 * Serverworks CSB5 IDE does not fully support native mode
1122 */
1123static void quirk_svwks_csb5ide(struct pci_dev *pdev) 1280static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1124{ 1281{
1125 u8 prog; 1282 u8 prog;
@@ -1133,9 +1290,7 @@ static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1133} 1290}
1134DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide); 1291DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1135 1292
1136/* 1293/* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */
1137 * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
1138 */
1139static void quirk_ide_samemode(struct pci_dev *pdev) 1294static void quirk_ide_samemode(struct pci_dev *pdev)
1140{ 1295{
1141 u8 prog; 1296 u8 prog;
@@ -1151,10 +1306,7 @@ static void quirk_ide_samemode(struct pci_dev *pdev)
1151} 1306}
1152DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); 1307DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1153 1308
1154/* 1309/* Some ATA devices break if put into D3 */
1155 * Some ATA devices break if put into D3
1156 */
1157
1158static void quirk_no_ata_d3(struct pci_dev *pdev) 1310static void quirk_no_ata_d3(struct pci_dev *pdev)
1159{ 1311{
1160 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; 1312 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
@@ -1172,7 +1324,8 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1172DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, 1324DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1173 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); 1325 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1174 1326
1175/* This was originally an Alpha specific thing, but it really fits here. 1327/*
1328 * This was originally an Alpha-specific thing, but it really fits here.
1176 * The i82375 PCI/EISA bridge appears as non-classified. Fix that. 1329 * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
1177 */ 1330 */
1178static void quirk_eisa_bridge(struct pci_dev *dev) 1331static void quirk_eisa_bridge(struct pci_dev *dev)
@@ -1181,7 +1334,6 @@ static void quirk_eisa_bridge(struct pci_dev *dev)
1181} 1334}
1182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge); 1335DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1183 1336
1184
1185/* 1337/*
1186 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge 1338 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
1187 * is not activated. The myth is that Asus said that they do not want the 1339 * is not activated. The myth is that Asus said that they do not want the
@@ -1398,15 +1550,19 @@ static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1398 1550
1399 if (likely(!asus_hides_smbus || !asus_rcba_base)) 1551 if (likely(!asus_hides_smbus || !asus_rcba_base))
1400 return; 1552 return;
1553
1401 /* read the Function Disable register, dword mode only */ 1554 /* read the Function Disable register, dword mode only */
1402 val = readl(asus_rcba_base + 0x3418); 1555 val = readl(asus_rcba_base + 0x3418);
1403 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */ 1556
1557 /* enable the SMBus device */
1558 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1404} 1559}
1405 1560
1406static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev) 1561static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1407{ 1562{
1408 if (likely(!asus_hides_smbus || !asus_rcba_base)) 1563 if (likely(!asus_hides_smbus || !asus_rcba_base))
1409 return; 1564 return;
1565
1410 iounmap(asus_rcba_base); 1566 iounmap(asus_rcba_base);
1411 asus_rcba_base = NULL; 1567 asus_rcba_base = NULL;
1412 pci_info(dev, "Enabled ICH6/i801 SMBus device\n"); 1568 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
@@ -1423,9 +1579,7 @@ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_
1423DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume); 1579DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1424DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early); 1580DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1425 1581
1426/* 1582/* SiS 96x south bridge: BIOS typically hides SMBus device... */
1427 * SiS 96x south bridge: BIOS typically hides SMBus device...
1428 */
1429static void quirk_sis_96x_smbus(struct pci_dev *dev) 1583static void quirk_sis_96x_smbus(struct pci_dev *dev)
1430{ 1584{
1431 u8 val = 0; 1585 u8 val = 0;
@@ -1448,7 +1602,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_si
1448 * ... This is further complicated by the fact that some SiS96x south 1602 * ... This is further complicated by the fact that some SiS96x south
1449 * bridges pretend to be 85C503/5513 instead. In that case see if we 1603 * bridges pretend to be 85C503/5513 instead. In that case see if we
1450 * spotted a compatible north bridge to make sure. 1604 * spotted a compatible north bridge to make sure.
1451 * (pci_find_device doesn't work yet) 1605 * (pci_find_device() doesn't work yet)
1452 * 1606 *
1453 * We can also enable the sis96x bit in the discovery register.. 1607 * We can also enable the sis96x bit in the discovery register..
1454 */ 1608 */
@@ -1468,9 +1622,9 @@ static void quirk_sis_503(struct pci_dev *dev)
1468 } 1622 }
1469 1623
1470 /* 1624 /*
1471 * Ok, it now shows up as a 96x.. run the 96x quirk by 1625 * Ok, it now shows up as a 96x. Run the 96x quirk by hand in case
1472 * hand in case it has already been processed. 1626 * it has already been processed. (Depends on link order, which is
1473 * (depends on link order, which is apparently not guaranteed) 1627 * apparently not guaranteed)
1474 */ 1628 */
1475 dev->device = devid; 1629 dev->device = devid;
1476 quirk_sis_96x_smbus(dev); 1630 quirk_sis_96x_smbus(dev);
@@ -1478,7 +1632,6 @@ static void quirk_sis_503(struct pci_dev *dev)
1478DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); 1632DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1479DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); 1633DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1480 1634
1481
1482/* 1635/*
1483 * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller 1636 * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
1484 * and MC97 modem controller are disabled when a second PCI soundcard is 1637 * and MC97 modem controller are disabled when a second PCI soundcard is
@@ -1515,9 +1668,8 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_h
1515#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) 1668#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1516 1669
1517/* 1670/*
1518 * If we are using libata we can drive this chip properly but must 1671 * If we are using libata we can drive this chip properly but must do this
1519 * do this early on to make the additional device appear during 1672 * early on to make the additional device appear during the PCI scanning.
1520 * the PCI scanning.
1521 */ 1673 */
1522static void quirk_jmicron_ata(struct pci_dev *pdev) 1674static void quirk_jmicron_ata(struct pci_dev *pdev)
1523{ 1675{
@@ -1613,14 +1765,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
1613 if ((pdev->class >> 8) != 0xff00) 1765 if ((pdev->class >> 8) != 0xff00)
1614 return; 1766 return;
1615 1767
1616 /* the first BAR is the location of the IO APIC...we must 1768 /*
1769 * The first BAR is the location of the IO-APIC... we must
1617 * not touch this (and it's already covered by the fixmap), so 1770 * not touch this (and it's already covered by the fixmap), so
1618 * forcibly insert it into the resource tree */ 1771 * forcibly insert it into the resource tree.
1772 */
1619 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0)) 1773 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1620 insert_resource(&iomem_resource, &pdev->resource[0]); 1774 insert_resource(&iomem_resource, &pdev->resource[0]);
1621 1775
1622 /* The next five BARs all seem to be rubbish, so just clean 1776 /*
1623 * them out */ 1777 * The next five BARs all seem to be rubbish, so just clean
1778 * them out.
1779 */
1624 for (i = 1; i < 6; i++) 1780 for (i = 1; i < 6; i++)
1625 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i])); 1781 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1626} 1782}
@@ -1638,8 +1794,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
1638DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch); 1794DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1639 1795
1640/* 1796/*
1641 * It's possible for the MSI to get corrupted if shpc and acpi 1797 * It's possible for the MSI to get corrupted if SHPC and ACPI are used
1642 * are used together on certain PXH-based systems. 1798 * together on certain PXH-based systems.
1643 */ 1799 */
1644static void quirk_pcie_pxh(struct pci_dev *dev) 1800static void quirk_pcie_pxh(struct pci_dev *dev)
1645{ 1801{
@@ -1653,15 +1809,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pc
1653DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh); 1809DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1654 1810
1655/* 1811/*
1656 * Some Intel PCI Express chipsets have trouble with downstream 1812 * Some Intel PCI Express chipsets have trouble with downstream device
1657 * device power management. 1813 * power management.
1658 */ 1814 */
1659static void quirk_intel_pcie_pm(struct pci_dev *dev) 1815static void quirk_intel_pcie_pm(struct pci_dev *dev)
1660{ 1816{
1661 pci_pm_d3_delay = 120; 1817 pci_pm_d3_delay = 120;
1662 dev->no_d1d2 = 1; 1818 dev->no_d1d2 = 1;
1663} 1819}
1664
1665DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm); 1820DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1666DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm); 1821DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1667DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm); 1822DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
@@ -1723,7 +1878,7 @@ static const struct dmi_system_id boot_interrupt_dmi_table[] = {
1723 1878
1724/* 1879/*
1725 * Boot interrupts on some chipsets cannot be turned off. For these chipsets, 1880 * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
1726 * remap the original interrupt in the linux kernel to the boot interrupt, so 1881 * remap the original interrupt in the Linux kernel to the boot interrupt, so
1727 * that a PCI device's interrupt handler is installed on the boot interrupt 1882 * that a PCI device's interrupt handler is installed on the boot interrupt
1728 * line instead. 1883 * line instead.
1729 */ 1884 */
@@ -1760,7 +1915,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk
1760 */ 1915 */
1761 1916
1762/* 1917/*
1763 * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no 1918 * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
1764 * 300641-004US, section 5.7.3. 1919 * 300641-004US, section 5.7.3.
1765 */ 1920 */
1766#define INTEL_6300_IOAPIC_ABAR 0x40 1921#define INTEL_6300_IOAPIC_ABAR 0x40
@@ -1783,9 +1938,7 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1783DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); 1938DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1784DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); 1939DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
1785 1940
1786/* 1941/* Disable boot interrupts on HT-1000 */
1787 * disable boot interrupts on HT-1000
1788 */
1789#define BC_HT1000_FEATURE_REG 0x64 1942#define BC_HT1000_FEATURE_REG 0x64
1790#define BC_HT1000_PIC_REGS_ENABLE (1<<0) 1943#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
1791#define BC_HT1000_MAP_IDX 0xC00 1944#define BC_HT1000_MAP_IDX 0xC00
@@ -1816,9 +1969,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1816DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); 1969DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1817DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); 1970DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
1818 1971
1819/* 1972/* Disable boot interrupts on AMD and ATI chipsets */
1820 * disable boot interrupts on AMD and ATI chipsets 1973
1821 */
1822/* 1974/*
1823 * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 1975 * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
1824 * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode 1976 * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
@@ -1894,7 +2046,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
1894 quirk_tc86c001_ide); 2046 quirk_tc86c001_ide);
1895 2047
1896/* 2048/*
1897 * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the 2049 * PLX PCI 9050 PCI Target bridge controller has an erratum that prevents the
1898 * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o) 2050 * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
1899 * being read correctly if bit 7 of the base address is set. 2051 * being read correctly if bit 7 of the base address is set.
1900 * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128). 2052 * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
@@ -2087,15 +2239,17 @@ static void quirk_p64h2_1k_io(struct pci_dev *dev)
2087 dev->io_window_1k = 1; 2239 dev->io_window_1k = 1;
2088 } 2240 }
2089} 2241}
2090DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); 2242DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2091 2243
2092/* Under some circumstances, AER is not linked with extended capabilities. 2244/*
2245 * Under some circumstances, AER is not linked with extended capabilities.
2093 * Force it to be linked by setting the corresponding control bit in the 2246 * Force it to be linked by setting the corresponding control bit in the
2094 * config space. 2247 * config space.
2095 */ 2248 */
2096static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) 2249static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2097{ 2250{
2098 uint8_t b; 2251 uint8_t b;
2252
2099 if (pci_read_config_byte(dev, 0xf41, &b) == 0) { 2253 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2100 if (!(b & 0x20)) { 2254 if (!(b & 0x20)) {
2101 pci_write_config_byte(dev, 0xf41, b | 0x20); 2255 pci_write_config_byte(dev, 0xf41, b | 0x20);
@@ -2125,8 +2279,10 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2125 PCI_DEVICE_ID_VIA_8235_USB_2, NULL); 2279 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2126 uint8_t b; 2280 uint8_t b;
2127 2281
2128 /* p should contain the first (internal) VT6212L -- see if we have 2282 /*
2129 an external one by searching again */ 2283 * p should contain the first (internal) VT6212L -- see if we have
2284 * an external one by searching again.
2285 */
2130 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p); 2286 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2131 if (!p) 2287 if (!p)
2132 return; 2288 return;
@@ -2171,7 +2327,6 @@ static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2171 pcie_set_readrq(dev, 2048); 2327 pcie_set_readrq(dev, 2048);
2172 } 2328 }
2173} 2329}
2174
2175DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM, 2330DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2176 PCI_DEVICE_ID_TIGON3_5719, 2331 PCI_DEVICE_ID_TIGON3_5719,
2177 quirk_brcm_5719_limit_mrrs); 2332 quirk_brcm_5719_limit_mrrs);
@@ -2179,14 +2334,16 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2179#ifdef CONFIG_PCIE_IPROC_PLATFORM 2334#ifdef CONFIG_PCIE_IPROC_PLATFORM
2180static void quirk_paxc_bridge(struct pci_dev *pdev) 2335static void quirk_paxc_bridge(struct pci_dev *pdev)
2181{ 2336{
2182 /* The PCI config space is shared with the PAXC root port and the first 2337 /*
2338 * The PCI config space is shared with the PAXC root port and the first
2183 * Ethernet device. So, we need to workaround this by telling the PCI 2339 * Ethernet device. So, we need to workaround this by telling the PCI
2184 * code that the bridge is not an Ethernet device. 2340 * code that the bridge is not an Ethernet device.
2185 */ 2341 */
2186 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) 2342 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2187 pdev->class = PCI_CLASS_BRIDGE_PCI << 8; 2343 pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
2188 2344
2189 /* MPSS is not being set properly (as it is currently 0). This is 2345 /*
2346 * MPSS is not being set properly (as it is currently 0). This is
2190 * because that area of the PCI config space is hard coded to zero, and 2347 * because that area of the PCI config space is hard coded to zero, and
2191 * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) 2348 * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
2192 * so that the MPS can be set to the real max value. 2349 * so that the MPS can be set to the real max value.
@@ -2197,10 +2354,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
2197DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge); 2354DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
2198#endif 2355#endif
2199 2356
2200/* Originally in EDAC sources for i82875P: 2357/*
2201 * Intel tells BIOS developers to hide device 6 which 2358 * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
2202 * configures the overflow device access containing 2359 * hide device 6 which configures the overflow device access containing the
2203 * the DRBs - this is where we expose device 6. 2360 * DRBs - this is where we expose device 6.
2204 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm 2361 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
2205 */ 2362 */
2206static void quirk_unhide_mch_dev6(struct pci_dev *dev) 2363static void quirk_unhide_mch_dev6(struct pci_dev *dev)
@@ -2212,18 +2369,18 @@ static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2212 pci_write_config_byte(dev, 0xF4, reg | 0x02); 2369 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2213 } 2370 }
2214} 2371}
2215
2216DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, 2372DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2217 quirk_unhide_mch_dev6); 2373 quirk_unhide_mch_dev6);
2218DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, 2374DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2219 quirk_unhide_mch_dev6); 2375 quirk_unhide_mch_dev6);
2220 2376
2221#ifdef CONFIG_PCI_MSI 2377#ifdef CONFIG_PCI_MSI
2222/* Some chipsets do not support MSI. We cannot easily rely on setting 2378/*
2223 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually 2379 * Some chipsets do not support MSI. We cannot easily rely on setting
2224 * some other buses controlled by the chipset even if Linux is not 2380 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually some
2225 * aware of it. Instead of setting the flag on all buses in the 2381 * other buses controlled by the chipset even if Linux is not aware of it.
2226 * machine, simply disable MSI globally. 2382 * Instead of setting the flag on all buses in the machine, simply disable
2383 * MSI globally.
2227 */ 2384 */
2228static void quirk_disable_all_msi(struct pci_dev *dev) 2385static void quirk_disable_all_msi(struct pci_dev *dev)
2229{ 2386{
@@ -2271,8 +2428,10 @@ static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2271DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi); 2428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2272DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi); 2429DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2273 2430
2274/* Go through the list of Hypertransport capabilities and 2431/*
2275 * return 1 if a HT MSI capability is found and enabled */ 2432 * Go through the list of HyperTransport capabilities and return 1 if a HT
2433 * MSI capability is found and enabled.
2434 */
2276static int msi_ht_cap_enabled(struct pci_dev *dev) 2435static int msi_ht_cap_enabled(struct pci_dev *dev)
2277{ 2436{
2278 int pos, ttl = PCI_FIND_CAP_TTL; 2437 int pos, ttl = PCI_FIND_CAP_TTL;
@@ -2295,7 +2454,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
2295 return 0; 2454 return 0;
2296} 2455}
2297 2456
2298/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */ 2457/* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */
2299static void quirk_msi_ht_cap(struct pci_dev *dev) 2458static void quirk_msi_ht_cap(struct pci_dev *dev)
2300{ 2459{
2301 if (dev->subordinate && !msi_ht_cap_enabled(dev)) { 2460 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
@@ -2306,8 +2465,9 @@ static void quirk_msi_ht_cap(struct pci_dev *dev)
2306DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, 2465DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2307 quirk_msi_ht_cap); 2466 quirk_msi_ht_cap);
2308 2467
2309/* The nVidia CK804 chipset may have 2 HT MSI mappings. 2468/*
2310 * MSI are supported if the MSI capability set in any of these mappings. 2469 * The nVidia CK804 chipset may have 2 HT MSI mappings. MSI is supported
2470 * if the MSI capability is set in any of these mappings.
2311 */ 2471 */
2312static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) 2472static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2313{ 2473{
@@ -2316,8 +2476,9 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2316 if (!dev->subordinate) 2476 if (!dev->subordinate)
2317 return; 2477 return;
2318 2478
2319 /* check HT MSI cap on this chipset and the root one. 2479 /*
2320 * a single one having MSI is enough to be sure that MSI are supported. 2480 * Check HT MSI cap on this chipset and the root one. A single one
2481 * having MSI is enough to be sure that MSI is supported.
2321 */ 2482 */
2322 pdev = pci_get_slot(dev->bus, 0); 2483 pdev = pci_get_slot(dev->bus, 0);
2323 if (!pdev) 2484 if (!pdev)
@@ -2354,13 +2515,13 @@ static void ht_enable_msi_mapping(struct pci_dev *dev)
2354DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 2515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2355 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, 2516 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2356 ht_enable_msi_mapping); 2517 ht_enable_msi_mapping);
2357
2358DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, 2518DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2359 ht_enable_msi_mapping); 2519 ht_enable_msi_mapping);
2360 2520
2361/* The P5N32-SLI motherboards from Asus have a problem with msi 2521/*
2362 * for the MCP55 NIC. It is not yet determined whether the msi problem 2522 * The P5N32-SLI motherboards from Asus have a problem with MSI
2363 * also affects other devices. As for now, turn off msi for this device. 2523 * for the MCP55 NIC. It is not yet determined whether the MSI problem
2524 * also affects other devices. As for now, turn off MSI for this device.
2364 */ 2525 */
2365static void nvenet_msi_disable(struct pci_dev *dev) 2526static void nvenet_msi_disable(struct pci_dev *dev)
2366{ 2527{
@@ -2397,16 +2558,14 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2397 pci_read_config_dword(dev, 0x74, &cfg); 2558 pci_read_config_dword(dev, 0x74, &cfg);
2398 2559
2399 if (cfg & ((1 << 2) | (1 << 15))) { 2560 if (cfg & ((1 << 2) | (1 << 15))) {
2400 printk(KERN_INFO "Rewriting irq routing register on MCP55\n"); 2561 printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n");
2401 cfg &= ~((1 << 2) | (1 << 15)); 2562 cfg &= ~((1 << 2) | (1 << 15));
2402 pci_write_config_dword(dev, 0x74, cfg); 2563 pci_write_config_dword(dev, 0x74, cfg);
2403 } 2564 }
2404} 2565}
2405
2406DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 2566DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2407 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0, 2567 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2408 nvbridge_check_legacy_irq_routing); 2568 nvbridge_check_legacy_irq_routing);
2409
2410DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 2569DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2411 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4, 2570 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2412 nvbridge_check_legacy_irq_routing); 2571 nvbridge_check_legacy_irq_routing);
@@ -2416,7 +2575,7 @@ static int ht_check_msi_mapping(struct pci_dev *dev)
2416 int pos, ttl = PCI_FIND_CAP_TTL; 2575 int pos, ttl = PCI_FIND_CAP_TTL;
2417 int found = 0; 2576 int found = 0;
2418 2577
2419 /* check if there is HT MSI cap or enabled on this device */ 2578 /* Check if there is HT MSI cap or enabled on this device */
2420 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); 2579 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2421 while (pos && ttl--) { 2580 while (pos && ttl--) {
2422 u8 flags; 2581 u8 flags;
@@ -2452,7 +2611,7 @@ static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2452 if (!dev) 2611 if (!dev)
2453 continue; 2612 continue;
2454 2613
2455 /* found next host bridge ?*/ 2614 /* found next host bridge? */
2456 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE); 2615 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2457 if (pos != 0) { 2616 if (pos != 0) {
2458 pci_dev_put(dev); 2617 pci_dev_put(dev);
@@ -2611,27 +2770,27 @@ static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2611{ 2770{
2612 return __nv_msi_ht_cap_quirk(dev, 1); 2771 return __nv_msi_ht_cap_quirk(dev, 1);
2613} 2772}
2773DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2774DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2614 2775
2615static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev) 2776static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2616{ 2777{
2617 return __nv_msi_ht_cap_quirk(dev, 0); 2778 return __nv_msi_ht_cap_quirk(dev, 0);
2618} 2779}
2619
2620DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); 2780DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2621DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); 2781DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2622 2782
2623DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2624DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2625
2626static void quirk_msi_intx_disable_bug(struct pci_dev *dev) 2783static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2627{ 2784{
2628 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; 2785 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2629} 2786}
2787
2630static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev) 2788static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2631{ 2789{
2632 struct pci_dev *p; 2790 struct pci_dev *p;
2633 2791
2634 /* SB700 MSI issue will be fixed at HW level from revision A21, 2792 /*
2793 * SB700 MSI issue will be fixed at HW level from revision A21;
2635 * we need check PCI REVISION ID of SMBus controller to get SB700 2794 * we need check PCI REVISION ID of SMBus controller to get SB700
2636 * revision. 2795 * revision.
2637 */ 2796 */
@@ -2644,6 +2803,7 @@ static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2644 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; 2803 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2645 pci_dev_put(p); 2804 pci_dev_put(p);
2646} 2805}
2806
2647static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev) 2807static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2648{ 2808{
2649 /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */ 2809 /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
@@ -2713,55 +2873,56 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
2713 quirk_msi_intx_disable_qca_bug); 2873 quirk_msi_intx_disable_qca_bug);
2714#endif /* CONFIG_PCI_MSI */ 2874#endif /* CONFIG_PCI_MSI */
2715 2875
2716/* Allow manual resource allocation for PCI hotplug bridges 2876/*
2717 * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For 2877 * Allow manual resource allocation for PCI hotplug bridges via
2718 * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), 2878 * pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For some PCI-PCI
2719 * kernel fails to allocate resources when hotplug device is 2879 * hotplug bridges, like PLX 6254 (former HINT HB6), kernel fails to
2720 * inserted and PCI bus is rescanned. 2880 * allocate resources when hotplug device is inserted and PCI bus is
2881 * rescanned.
2721 */ 2882 */
2722static void quirk_hotplug_bridge(struct pci_dev *dev) 2883static void quirk_hotplug_bridge(struct pci_dev *dev)
2723{ 2884{
2724 dev->is_hotplug_bridge = 1; 2885 dev->is_hotplug_bridge = 1;
2725} 2886}
2726
2727DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge); 2887DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
2728 2888
2729/* 2889/*
2730 * This is a quirk for the Ricoh MMC controller found as a part of 2890 * This is a quirk for the Ricoh MMC controller found as a part of some
2731 * some mulifunction chips. 2891 * multifunction chips.
2732 2892 *
2733 * This is very similar and based on the ricoh_mmc driver written by 2893 * This is very similar and based on the ricoh_mmc driver written by
2734 * Philip Langdale. Thank you for these magic sequences. 2894 * Philip Langdale. Thank you for these magic sequences.
2735 * 2895 *
2736 * These chips implement the four main memory card controllers (SD, MMC, MS, xD) 2896 * These chips implement the four main memory card controllers (SD, MMC,
2737 * and one or both of cardbus or firewire. 2897 * MS, xD) and one or both of CardBus or FireWire.
2738 * 2898 *
2739 * It happens that they implement SD and MMC 2899 * It happens that they implement SD and MMC support as separate
2740 * support as separate controllers (and PCI functions). The linux SDHCI 2900 * controllers (and PCI functions). The Linux SDHCI driver supports MMC
2741 * driver supports MMC cards but the chip detects MMC cards in hardware 2901 * cards but the chip detects MMC cards in hardware and directs them to the
2742 * and directs them to the MMC controller - so the SDHCI driver never sees 2902 * MMC controller - so the SDHCI driver never sees them.
2743 * them.
2744 * 2903 *
2745 * To get around this, we must disable the useless MMC controller. 2904 * To get around this, we must disable the useless MMC controller. At that
2746 * At that point, the SDHCI controller will start seeing them 2905 * point, the SDHCI controller will start seeing them. It seems to be the
2747 * It seems to be the case that the relevant PCI registers to deactivate the 2906 * case that the relevant PCI registers to deactivate the MMC controller
2748 * MMC controller live on PCI function 0, which might be the cardbus controller 2907 * live on PCI function 0, which might be the CardBus controller or the
2749 * or the firewire controller, depending on the particular chip in question 2908 * FireWire controller, depending on the particular chip in question
2750 * 2909 *
2751 * This has to be done early, because as soon as we disable the MMC controller 2910 * This has to be done early, because as soon as we disable the MMC controller
2752 * other pci functions shift up one level, e.g. function #2 becomes function 2911 * other PCI functions shift up one level, e.g. function #2 becomes function
2753 * #1, and this will confuse the pci core. 2912 * #1, and this will confuse the PCI core.
2754 */ 2913 */
2755
2756#ifdef CONFIG_MMC_RICOH_MMC 2914#ifdef CONFIG_MMC_RICOH_MMC
2757static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev) 2915static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
2758{ 2916{
2759 /* disable via cardbus interface */
2760 u8 write_enable; 2917 u8 write_enable;
2761 u8 write_target; 2918 u8 write_target;
2762 u8 disable; 2919 u8 disable;
2763 2920
2764 /* disable must be done via function #0 */ 2921 /*
2922 * Disable via CardBus interface
2923 *
2924 * This must be done via function #0
2925 */
2765 if (PCI_FUNC(dev->devfn)) 2926 if (PCI_FUNC(dev->devfn))
2766 return; 2927 return;
2767 2928
@@ -2777,7 +2938,7 @@ static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
2777 pci_write_config_byte(dev, 0x8E, write_enable); 2938 pci_write_config_byte(dev, 0x8E, write_enable);
2778 pci_write_config_byte(dev, 0x8D, write_target); 2939 pci_write_config_byte(dev, 0x8D, write_target);
2779 2940
2780 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n"); 2941 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
2781 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n"); 2942 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
2782} 2943}
2783DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); 2944DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
@@ -2785,17 +2946,20 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
2785 2946
2786static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) 2947static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2787{ 2948{
2788 /* disable via firewire interface */
2789 u8 write_enable; 2949 u8 write_enable;
2790 u8 disable; 2950 u8 disable;
2791 2951
2792 /* disable must be done via function #0 */ 2952 /*
2953 * Disable via FireWire interface
2954 *
2955 * This must be done via function #0
2956 */
2793 if (PCI_FUNC(dev->devfn)) 2957 if (PCI_FUNC(dev->devfn))
2794 return; 2958 return;
2795 /* 2959 /*
2796 * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize 2960 * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
2797 * certain types of SD/MMC cards. Lowering the SD base 2961 * certain types of SD/MMC cards. Lowering the SD base clock
2798 * clock frequency from 200Mhz to 50Mhz fixes this issue. 2962 * frequency from 200Mhz to 50Mhz fixes this issue.
2799 * 2963 *
2800 * 0x150 - SD2.0 mode enable for changing base clock 2964 * 0x150 - SD2.0 mode enable for changing base clock
2801 * frequency to 50Mhz 2965 * frequency to 50Mhz
@@ -2826,7 +2990,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2826 pci_write_config_byte(dev, 0xCB, disable | 0x02); 2990 pci_write_config_byte(dev, 0xCB, disable | 0x02);
2827 pci_write_config_byte(dev, 0xCA, write_enable); 2991 pci_write_config_byte(dev, 0xCA, write_enable);
2828 2992
2829 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); 2993 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
2830 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n"); 2994 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
2831 2995
2832} 2996}
@@ -2842,13 +3006,13 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823,
2842#define VTUNCERRMSK_REG 0x1ac 3006#define VTUNCERRMSK_REG 0x1ac
2843#define VTD_MSK_SPEC_ERRORS (1 << 31) 3007#define VTD_MSK_SPEC_ERRORS (1 << 31)
2844/* 3008/*
2845 * This is a quirk for masking vt-d spec defined errors to platform error 3009 * This is a quirk for masking VT-d spec-defined errors to platform error
2846 * handling logic. With out this, platforms using Intel 7500, 5500 chipsets 3010 * handling logic. Without this, platforms using Intel 7500, 5500 chipsets
2847 * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based 3011 * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
2848 * on the RAS config settings of the platform) when a vt-d fault happens. 3012 * on the RAS config settings of the platform) when a VT-d fault happens.
2849 * The resulting SMI caused the system to hang. 3013 * The resulting SMI caused the system to hang.
2850 * 3014 *
2851 * VT-d spec related errors are already handled by the VT-d OS code, so no 3015 * VT-d spec-related errors are already handled by the VT-d OS code, so no
2852 * need to report the same error through other channels. 3016 * need to report the same error through other channels.
2853 */ 3017 */
2854static void vtd_mask_spec_errors(struct pci_dev *dev) 3018static void vtd_mask_spec_errors(struct pci_dev *dev)
@@ -2874,7 +3038,8 @@ static void fixup_ti816x_class(struct pci_dev *dev)
2874DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800, 3038DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
2875 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class); 3039 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
2876 3040
2877/* Some PCIe devices do not work reliably with the claimed maximum 3041/*
3042 * Some PCIe devices do not work reliably with the claimed maximum
2878 * payload size supported. 3043 * payload size supported.
2879 */ 3044 */
2880static void fixup_mpss_256(struct pci_dev *dev) 3045static void fixup_mpss_256(struct pci_dev *dev)
@@ -2888,9 +3053,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2888DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, 3053DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2889 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); 3054 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
2890 3055
2891/* Intel 5000 and 5100 Memory controllers have an errata with read completion 3056/*
3057 * Intel 5000 and 5100 Memory controllers have an erratum with read completion
2892 * coalescing (which is enabled by default on some BIOSes) and MPS of 256B. 3058 * coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
2893 * Since there is no way of knowing what the PCIE MPS on each fabric will be 3059 * Since there is no way of knowing what the PCIe MPS on each fabric will be
2894 * until all of the devices are discovered and buses walked, read completion 3060 * until all of the devices are discovered and buses walked, read completion
2895 * coalescing must be disabled. Unfortunately, it cannot be re-enabled because 3061 * coalescing must be disabled. Unfortunately, it cannot be re-enabled because
2896 * it is possible to hotplug a device with MPS of 256B. 3062 * it is possible to hotplug a device with MPS of 256B.
@@ -2904,9 +3070,10 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
2904 pcie_bus_config == PCIE_BUS_DEFAULT) 3070 pcie_bus_config == PCIE_BUS_DEFAULT)
2905 return; 3071 return;
2906 3072
2907 /* Intel errata specifies bits to change but does not say what they are. 3073 /*
2908 * Keeping them magical until such time as the registers and values can 3074 * Intel erratum specifies bits to change but does not say what
2909 * be explained. 3075 * they are. Keeping them magical until such time as the registers
3076 * and values can be explained.
2910 */ 3077 */
2911 err = pci_read_config_word(dev, 0x48, &rcc); 3078 err = pci_read_config_word(dev, 0x48, &rcc);
2912 if (err) { 3079 if (err) {
@@ -2925,7 +3092,7 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
2925 return; 3092 return;
2926 } 3093 }
2927 3094
2928 pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n"); 3095 pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
2929} 3096}
2930/* Intel 5000 series memory controllers and ports 2-7 */ 3097/* Intel 5000 series memory controllers and ports 2-7 */
2931DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata); 3098DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
@@ -2955,11 +3122,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
2955DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); 3122DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
2956DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); 3123DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
2957 3124
2958
2959/* 3125/*
2960 * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To 3126 * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.
2961 * work around this, query the size it should be configured to by the device and 3127 * To work around this, query the size it should be configured to by the
2962 * modify the resource end to correspond to this new size. 3128 * device and modify the resource end to correspond to this new size.
2963 */ 3129 */
2964static void quirk_intel_ntb(struct pci_dev *dev) 3130static void quirk_intel_ntb(struct pci_dev *dev)
2965{ 3131{
@@ -2981,39 +3147,17 @@ static void quirk_intel_ntb(struct pci_dev *dev)
2981DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb); 3147DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
2982DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb); 3148DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
2983 3149
2984static ktime_t fixup_debug_start(struct pci_dev *dev,
2985 void (*fn)(struct pci_dev *dev))
2986{
2987 if (initcall_debug)
2988 pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
2989
2990 return ktime_get();
2991}
2992
2993static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
2994 void (*fn)(struct pci_dev *dev))
2995{
2996 ktime_t delta, rettime;
2997 unsigned long long duration;
2998
2999 rettime = ktime_get();
3000 delta = ktime_sub(rettime, calltime);
3001 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
3002 if (initcall_debug || duration > 10000)
3003 pci_info(dev, "%pF took %lld usecs\n", fn, duration);
3004}
3005
3006/* 3150/*
3007 * Some BIOS implementations leave the Intel GPU interrupts enabled, 3151 * Some BIOS implementations leave the Intel GPU interrupts enabled, even
3008 * even though no one is handling them (f.e. i915 driver is never loaded). 3152 * though no one is handling them (e.g., if the i915 driver is never
3009 * Additionally the interrupt destination is not set up properly 3153 * loaded). Additionally the interrupt destination is not set up properly
3010 * and the interrupt ends up -somewhere-. 3154 * and the interrupt ends up -somewhere-.
3011 * 3155 *
3012 * These spurious interrupts are "sticky" and the kernel disables 3156 * These spurious interrupts are "sticky" and the kernel disables the
3013 * the (shared) interrupt line after 100.000+ generated interrupts. 3157 * (shared) interrupt line after 100,000+ generated interrupts.
3014 * 3158 *
3015 * Fix it by disabling the still enabled interrupts. 3159 * Fix it by disabling the still enabled interrupts. This resolves crashes
3016 * This resolves crashes often seen on monitor unplug. 3160 * often seen on monitor unplug.
3017 */ 3161 */
3018#define I915_DEIER_REG 0x4400c 3162#define I915_DEIER_REG 0x4400c
3019static void disable_igfx_irq(struct pci_dev *dev) 3163static void disable_igfx_irq(struct pci_dev *dev)
@@ -3101,38 +3245,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3101 * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking, 3245 * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
3102 * DisINTx can be set but the interrupt status bit is non-functional. 3246 * DisINTx can be set but the interrupt status bit is non-functional.
3103 */ 3247 */
3104DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, 3248DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
3105 quirk_broken_intx_masking); 3249DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
3106DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, 3250DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
3107 quirk_broken_intx_masking); 3251DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
3108DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, 3252DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
3109 quirk_broken_intx_masking); 3253DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
3110DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, 3254DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
3111 quirk_broken_intx_masking); 3255DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
3112DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, 3256DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
3113 quirk_broken_intx_masking); 3257DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
3114DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, 3258DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
3115 quirk_broken_intx_masking); 3259DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
3116DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, 3260DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
3117 quirk_broken_intx_masking); 3261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
3118DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, 3262DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
3119 quirk_broken_intx_masking); 3263DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
3120DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
3121 quirk_broken_intx_masking);
3122DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
3123 quirk_broken_intx_masking);
3124DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
3125 quirk_broken_intx_masking);
3126DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
3127 quirk_broken_intx_masking);
3128DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
3129 quirk_broken_intx_masking);
3130DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
3131 quirk_broken_intx_masking);
3132DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
3133 quirk_broken_intx_masking);
3134DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
3135 quirk_broken_intx_masking);
3136 3264
3137static u16 mellanox_broken_intx_devs[] = { 3265static u16 mellanox_broken_intx_devs[] = {
3138 PCI_DEVICE_ID_MELLANOX_HERMON_SDR, 3266 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
@@ -3177,7 +3305,8 @@ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3177 } 3305 }
3178 } 3306 }
3179 3307
3180 /* Getting here means Connect-IB cards and up. Connect-IB has no INTx 3308 /*
3309 * Getting here means Connect-IB cards and up. Connect-IB has no INTx
3181 * support so shouldn't be checked further 3310 * support so shouldn't be checked further
3182 */ 3311 */
3183 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB) 3312 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
@@ -3297,8 +3426,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3297 * shutdown before suspend. Otherwise the native host interface (NHI) will not 3426 * shutdown before suspend. Otherwise the native host interface (NHI) will not
3298 * be present after resume if a device was plugged in before suspend. 3427 * be present after resume if a device was plugged in before suspend.
3299 * 3428 *
3300 * The thunderbolt controller consists of a pcie switch with downstream 3429 * The Thunderbolt controller consists of a PCIe switch with downstream
3301 * bridges leading to the NHI and to the tunnel pci bridges. 3430 * bridges leading to the NHI and to the tunnel PCI bridges.
3302 * 3431 *
3303 * This quirk cuts power to the whole chip. Therefore we have to apply it 3432 * This quirk cuts power to the whole chip. Therefore we have to apply it
3304 * during suspend_noirq of the upstream bridge. 3433 * during suspend_noirq of the upstream bridge.
@@ -3316,17 +3445,19 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3316 bridge = ACPI_HANDLE(&dev->dev); 3445 bridge = ACPI_HANDLE(&dev->dev);
3317 if (!bridge) 3446 if (!bridge)
3318 return; 3447 return;
3448
3319 /* 3449 /*
3320 * SXIO and SXLV are present only on machines requiring this quirk. 3450 * SXIO and SXLV are present only on machines requiring this quirk.
3321 * TB bridges in external devices might have the same device id as those 3451 * Thunderbolt bridges in external devices might have the same
3322 * on the host, but they will not have the associated ACPI methods. This 3452 * device ID as those on the host, but they will not have the
3323 * implicitly checks that we are at the right bridge. 3453 * associated ACPI methods. This implicitly checks that we are at
3454 * the right bridge.
3324 */ 3455 */
3325 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO)) 3456 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3326 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP)) 3457 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3327 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV))) 3458 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3328 return; 3459 return;
3329 pci_info(dev, "quirk: cutting power to thunderbolt controller...\n"); 3460 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3330 3461
3331 /* magic sequence */ 3462 /* magic sequence */
3332 acpi_execute_simple_method(SXIO, NULL, 1); 3463 acpi_execute_simple_method(SXIO, NULL, 1);
@@ -3341,9 +3472,9 @@ DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3341 quirk_apple_poweroff_thunderbolt); 3472 quirk_apple_poweroff_thunderbolt);
3342 3473
3343/* 3474/*
3344 * Apple: Wait for the thunderbolt controller to reestablish pci tunnels. 3475 * Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels
3345 * 3476 *
3346 * During suspend the thunderbolt controller is reset and all pci 3477 * During suspend the Thunderbolt controller is reset and all PCI
3347 * tunnels are lost. The NHI driver will try to reestablish all tunnels 3478 * tunnels are lost. The NHI driver will try to reestablish all tunnels
3348 * during resume. We have to manually wait for the NHI since there is 3479 * during resume. We have to manually wait for the NHI since there is
3349 * no parent child relationship between the NHI and the tunneled 3480 * no parent child relationship between the NHI and the tunneled
@@ -3358,9 +3489,10 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3358 return; 3489 return;
3359 if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM) 3490 if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
3360 return; 3491 return;
3492
3361 /* 3493 /*
3362 * Find the NHI and confirm that we are a bridge on the tb host 3494 * Find the NHI and confirm that we are a bridge on the Thunderbolt
3363 * controller and not on a tb endpoint. 3495 * host controller and not on a Thunderbolt endpoint.
3364 */ 3496 */
3365 sibling = pci_get_slot(dev->bus, 0x0); 3497 sibling = pci_get_slot(dev->bus, 0x0);
3366 if (sibling == dev) 3498 if (sibling == dev)
@@ -3377,7 +3509,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3377 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) 3509 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
3378 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8) 3510 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
3379 goto out; 3511 goto out;
3380 pci_info(dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); 3512 pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
3381 device_pm_wait_for_dev(&dev->dev, &nhi->dev); 3513 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3382out: 3514out:
3383 pci_dev_put(nhi); 3515 pci_dev_put(nhi);
@@ -3397,142 +3529,6 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3397 quirk_apple_wait_for_thunderbolt); 3529 quirk_apple_wait_for_thunderbolt);
3398#endif 3530#endif
3399 3531
3400static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
3401 struct pci_fixup *end)
3402{
3403 ktime_t calltime;
3404
3405 for (; f < end; f++)
3406 if ((f->class == (u32) (dev->class >> f->class_shift) ||
3407 f->class == (u32) PCI_ANY_ID) &&
3408 (f->vendor == dev->vendor ||
3409 f->vendor == (u16) PCI_ANY_ID) &&
3410 (f->device == dev->device ||
3411 f->device == (u16) PCI_ANY_ID)) {
3412 calltime = fixup_debug_start(dev, f->hook);
3413 f->hook(dev);
3414 fixup_debug_report(dev, calltime, f->hook);
3415 }
3416}
3417
3418extern struct pci_fixup __start_pci_fixups_early[];
3419extern struct pci_fixup __end_pci_fixups_early[];
3420extern struct pci_fixup __start_pci_fixups_header[];
3421extern struct pci_fixup __end_pci_fixups_header[];
3422extern struct pci_fixup __start_pci_fixups_final[];
3423extern struct pci_fixup __end_pci_fixups_final[];
3424extern struct pci_fixup __start_pci_fixups_enable[];
3425extern struct pci_fixup __end_pci_fixups_enable[];
3426extern struct pci_fixup __start_pci_fixups_resume[];
3427extern struct pci_fixup __end_pci_fixups_resume[];
3428extern struct pci_fixup __start_pci_fixups_resume_early[];
3429extern struct pci_fixup __end_pci_fixups_resume_early[];
3430extern struct pci_fixup __start_pci_fixups_suspend[];
3431extern struct pci_fixup __end_pci_fixups_suspend[];
3432extern struct pci_fixup __start_pci_fixups_suspend_late[];
3433extern struct pci_fixup __end_pci_fixups_suspend_late[];
3434
3435static bool pci_apply_fixup_final_quirks;
3436
3437void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
3438{
3439 struct pci_fixup *start, *end;
3440
3441 switch (pass) {
3442 case pci_fixup_early:
3443 start = __start_pci_fixups_early;
3444 end = __end_pci_fixups_early;
3445 break;
3446
3447 case pci_fixup_header:
3448 start = __start_pci_fixups_header;
3449 end = __end_pci_fixups_header;
3450 break;
3451
3452 case pci_fixup_final:
3453 if (!pci_apply_fixup_final_quirks)
3454 return;
3455 start = __start_pci_fixups_final;
3456 end = __end_pci_fixups_final;
3457 break;
3458
3459 case pci_fixup_enable:
3460 start = __start_pci_fixups_enable;
3461 end = __end_pci_fixups_enable;
3462 break;
3463
3464 case pci_fixup_resume:
3465 start = __start_pci_fixups_resume;
3466 end = __end_pci_fixups_resume;
3467 break;
3468
3469 case pci_fixup_resume_early:
3470 start = __start_pci_fixups_resume_early;
3471 end = __end_pci_fixups_resume_early;
3472 break;
3473
3474 case pci_fixup_suspend:
3475 start = __start_pci_fixups_suspend;
3476 end = __end_pci_fixups_suspend;
3477 break;
3478
3479 case pci_fixup_suspend_late:
3480 start = __start_pci_fixups_suspend_late;
3481 end = __end_pci_fixups_suspend_late;
3482 break;
3483
3484 default:
3485 /* stupid compiler warning, you would think with an enum... */
3486 return;
3487 }
3488 pci_do_fixups(dev, start, end);
3489}
3490EXPORT_SYMBOL(pci_fixup_device);
3491
3492
3493static int __init pci_apply_final_quirks(void)
3494{
3495 struct pci_dev *dev = NULL;
3496 u8 cls = 0;
3497 u8 tmp;
3498
3499 if (pci_cache_line_size)
3500 printk(KERN_DEBUG "PCI: CLS %u bytes\n",
3501 pci_cache_line_size << 2);
3502
3503 pci_apply_fixup_final_quirks = true;
3504 for_each_pci_dev(dev) {
3505 pci_fixup_device(pci_fixup_final, dev);
3506 /*
3507 * If arch hasn't set it explicitly yet, use the CLS
3508 * value shared by all PCI devices. If there's a
3509 * mismatch, fall back to the default value.
3510 */
3511 if (!pci_cache_line_size) {
3512 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
3513 if (!cls)
3514 cls = tmp;
3515 if (!tmp || cls == tmp)
3516 continue;
3517
3518 printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
3519 cls << 2, tmp << 2,
3520 pci_dfl_cache_line_size << 2);
3521 pci_cache_line_size = pci_dfl_cache_line_size;
3522 }
3523 }
3524
3525 if (!pci_cache_line_size) {
3526 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
3527 cls << 2, pci_dfl_cache_line_size << 2);
3528 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
3529 }
3530
3531 return 0;
3532}
3533
3534fs_initcall_sync(pci_apply_final_quirks);
3535
3536/* 3532/*
3537 * Following are device-specific reset methods which can be used to 3533 * Following are device-specific reset methods which can be used to
3538 * reset a single function if other methods (e.g. FLR, PM D0->D3) are 3534 * reset a single function if other methods (e.g. FLR, PM D0->D3) are
@@ -3602,9 +3598,7 @@ reset_complete:
3602 return 0; 3598 return 0;
3603} 3599}
3604 3600
3605/* 3601/* Device-specific reset method for Chelsio T4-based adapters */
3606 * Device-specific reset method for Chelsio T4-based adapters.
3607 */
3608static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe) 3602static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3609{ 3603{
3610 u16 old_command; 3604 u16 old_command;
@@ -3887,7 +3881,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
3887/* 3881/*
3888 * Some devices have problems with Transaction Layer Packets with the Relaxed 3882 * Some devices have problems with Transaction Layer Packets with the Relaxed
3889 * Ordering Attribute set. Such devices should mark themselves and other 3883 * Ordering Attribute set. Such devices should mark themselves and other
3890 * Device Drivers should check before sending TLPs with RO set. 3884 * device drivers should check before sending TLPs with RO set.
3891 */ 3885 */
3892static void quirk_relaxedordering_disable(struct pci_dev *dev) 3886static void quirk_relaxedordering_disable(struct pci_dev *dev)
3893{ 3887{
@@ -3897,7 +3891,7 @@ static void quirk_relaxedordering_disable(struct pci_dev *dev)
3897 3891
3898/* 3892/*
3899 * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root 3893 * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
3900 * Complex has a Flow Control Credit issue which can cause performance 3894 * Complex have a Flow Control Credit issue which can cause performance
3901 * problems with Upstream Transaction Layer Packets with Relaxed Ordering set. 3895 * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
3902 */ 3896 */
3903DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8, 3897DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
@@ -3958,7 +3952,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED
3958 quirk_relaxedordering_disable); 3952 quirk_relaxedordering_disable);
3959 3953
3960/* 3954/*
3961 * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex 3955 * The AMD ARM A1100 (aka "SEATTLE") SoC has a bug in its PCIe Root Complex
3962 * where Upstream Transaction Layer Packets with the Relaxed Ordering 3956 * where Upstream Transaction Layer Packets with the Relaxed Ordering
3963 * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering 3957 * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
3964 * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules 3958 * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
@@ -4022,7 +4016,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4022 * This mask/compare operation selects for Physical Function 4 on a 4016 * This mask/compare operation selects for Physical Function 4 on a
4023 * T5. We only need to fix up the Root Port once for any of the 4017 * T5. We only need to fix up the Root Port once for any of the
4024 * PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely 4018 * PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
4025 * 0x54xx so we use that one, 4019 * 0x54xx so we use that one.
4026 */ 4020 */
4027 if ((pdev->device & 0xff00) == 0x5400) 4021 if ((pdev->device & 0xff00) == 0x5400)
4028 quirk_disable_root_port_attributes(pdev); 4022 quirk_disable_root_port_attributes(pdev);
@@ -4113,7 +4107,7 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4113static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) 4107static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4114{ 4108{
4115 /* 4109 /*
4116 * X-Gene root matching this quirk do not allow peer-to-peer 4110 * X-Gene Root Ports matching this quirk do not allow peer-to-peer
4117 * transactions with others, allowing masking out these bits as if they 4111 * transactions with others, allowing masking out these bits as if they
4118 * were unimplemented in the ACS capability. 4112 * were unimplemented in the ACS capability.
4119 */ 4113 */
@@ -4230,11 +4224,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4230 * 0xa290-0xa29f PCI Express Root port #{0-16} 4224 * 0xa290-0xa29f PCI Express Root port #{0-16}
4231 * 0xa2e7-0xa2ee PCI Express Root port #{17-24} 4225 * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
4232 * 4226 *
4227 * Mobile chipsets are also affected, 7th & 8th Generation
4228 * Specification update confirms ACS errata 22, status no fix: (7th Generation
4229 * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
4230 * Processor Family I/O for U Quad Core Platforms Specification Update,
4231 * August 2017, Revision 002, Document#: 334660-002)[6]
4232 * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
4233 * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
4234 * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
4235 *
4236 * 0x9d10-0x9d1b PCI Express Root port #{1-12}
4237 *
4238 * The 300 series chipset suffers from the same bug so include those root
4239 * ports here as well.
4240 *
4241 * 0xa32c-0xa343 PCI Express Root port #{0-24}
4242 *
4233 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html 4243 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
4234 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html 4244 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
4235 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html 4245 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
4236 * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html 4246 * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
4237 * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html 4247 * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
4248 * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
4249 * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
4238 */ 4250 */
4239static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) 4251static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4240{ 4252{
@@ -4244,6 +4256,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4244 switch (dev->device) { 4256 switch (dev->device) {
4245 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ 4257 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
4246 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ 4258 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
4259 case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
4260 case 0xa32c ... 0xa343: /* 300 series */
4247 return true; 4261 return true;
4248 } 4262 }
4249 4263
@@ -4361,8 +4375,8 @@ static const struct pci_dev_acs_enabled {
4361 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, 4375 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4362 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, 4376 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4363 /* QCOM QDF2xxx root ports */ 4377 /* QCOM QDF2xxx root ports */
4364 { 0x17cb, 0x400, pci_quirk_qcom_rp_acs }, 4378 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
4365 { 0x17cb, 0x401, pci_quirk_qcom_rp_acs }, 4379 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
4366 /* Intel PCH root ports */ 4380 /* Intel PCH root ports */
4367 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, 4381 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4368 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, 4382 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
@@ -4436,7 +4450,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4436 /* 4450 /*
4437 * Read the RCBA register from the LPC (D31:F0). PCH root ports 4451 * Read the RCBA register from the LPC (D31:F0). PCH root ports
4438 * are D28:F* and therefore get probed before LPC, thus we can't 4452 * are D28:F* and therefore get probed before LPC, thus we can't
4439 * use pci_get_slot/pci_read_config_dword here. 4453 * use pci_get_slot()/pci_read_config_dword() here.
4440 */ 4454 */
4441 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0), 4455 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
4442 INTEL_LPC_RCBA_REG, &rcba); 4456 INTEL_LPC_RCBA_REG, &rcba);
@@ -4569,7 +4583,7 @@ int pci_dev_specific_enable_acs(struct pci_dev *dev)
4569} 4583}
4570 4584
4571/* 4585/*
4572 * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with 4586 * The PCI capabilities list for Intel DH895xCC VFs (device ID 0x0443) with
4573 * QuickAssist Technology (QAT) is prematurely terminated in hardware. The 4587 * QuickAssist Technology (QAT) is prematurely terminated in hardware. The
4574 * Next Capability pointer in the MSI Capability Structure should point to 4588 * Next Capability pointer in the MSI Capability Structure should point to
4575 * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating 4589 * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
@@ -4630,9 +4644,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
4630 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP)) 4644 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
4631 return; 4645 return;
4632 4646
4633 /* 4647 /* Save PCIe cap */
4634 * Save PCIE cap
4635 */
4636 state = kzalloc(sizeof(*state) + size, GFP_KERNEL); 4648 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
4637 if (!state) 4649 if (!state)
4638 return; 4650 return;
@@ -4653,7 +4665,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
4653} 4665}
4654DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); 4666DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
4655 4667
4656/* FLR may cause some 82579 devices to hang. */ 4668/* FLR may cause some 82579 devices to hang */
4657static void quirk_intel_no_flr(struct pci_dev *dev) 4669static void quirk_intel_no_flr(struct pci_dev *dev)
4658{ 4670{
4659 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; 4671 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 072784f55ea5..79b1824e83b4 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1943,56 +1943,56 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
1943 } 1943 }
1944 1944
1945 /* 1945 /*
1946 * There is only one bridge on the bus so it gets all available
1947 * resources which it can then distribute to the possible
1948 * hotplug bridges below.
1949 */
1950 if (hotplug_bridges + normal_bridges == 1) {
1951 dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
1952 if (dev->subordinate) {
1953 pci_bus_distribute_available_resources(dev->subordinate,
1954 add_list, available_io, available_mmio,
1955 available_mmio_pref);
1956 }
1957 return;
1958 }
1959
1960 /*
1946 * Go over devices on this bus and distribute the remaining 1961 * Go over devices on this bus and distribute the remaining
1947 * resource space between hotplug bridges. 1962 * resource space between hotplug bridges.
1948 */ 1963 */
1949 for_each_pci_bridge(dev, bus) { 1964 for_each_pci_bridge(dev, bus) {
1965 resource_size_t align, io, mmio, mmio_pref;
1950 struct pci_bus *b; 1966 struct pci_bus *b;
1951 1967
1952 b = dev->subordinate; 1968 b = dev->subordinate;
1953 if (!b) 1969 if (!b || !dev->is_hotplug_bridge)
1954 continue; 1970 continue;
1955 1971
1956 if (!hotplug_bridges && normal_bridges == 1) { 1972 /*
1957 /* 1973 * Distribute available extra resources equally between
1958 * There is only one bridge on the bus (upstream 1974 * hotplug-capable downstream ports taking alignment into
1959 * port) so it gets all available resources 1975 * account.
1960 * which it can then distribute to the possible 1976 *
1961 * hotplug bridges below. 1977 * Here hotplug_bridges is always != 0.
1962 */ 1978 */
1963 pci_bus_distribute_available_resources(b, add_list, 1979 align = pci_resource_alignment(bridge, io_res);
1964 available_io, available_mmio, 1980 io = div64_ul(available_io, hotplug_bridges);
1965 available_mmio_pref); 1981 io = min(ALIGN(io, align), remaining_io);
1966 } else if (dev->is_hotplug_bridge) { 1982 remaining_io -= io;
1967 resource_size_t align, io, mmio, mmio_pref; 1983
1968 1984 align = pci_resource_alignment(bridge, mmio_res);
1969 /* 1985 mmio = div64_ul(available_mmio, hotplug_bridges);
1970 * Distribute available extra resources equally 1986 mmio = min(ALIGN(mmio, align), remaining_mmio);
1971 * between hotplug-capable downstream ports 1987 remaining_mmio -= mmio;
1972 * taking alignment into account. 1988
1973 * 1989 align = pci_resource_alignment(bridge, mmio_pref_res);
1974 * Here hotplug_bridges is always != 0. 1990 mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
1975 */ 1991 mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
1976 align = pci_resource_alignment(bridge, io_res); 1992 remaining_mmio_pref -= mmio_pref;
1977 io = div64_ul(available_io, hotplug_bridges); 1993
1978 io = min(ALIGN(io, align), remaining_io); 1994 pci_bus_distribute_available_resources(b, add_list, io, mmio,
1979 remaining_io -= io; 1995 mmio_pref);
1980
1981 align = pci_resource_alignment(bridge, mmio_res);
1982 mmio = div64_ul(available_mmio, hotplug_bridges);
1983 mmio = min(ALIGN(mmio, align), remaining_mmio);
1984 remaining_mmio -= mmio;
1985
1986 align = pci_resource_alignment(bridge, mmio_pref_res);
1987 mmio_pref = div64_ul(available_mmio_pref,
1988 hotplug_bridges);
1989 mmio_pref = min(ALIGN(mmio_pref, align),
1990 remaining_mmio_pref);
1991 remaining_mmio_pref -= mmio_pref;
1992
1993 pci_bus_distribute_available_resources(b, add_list, io,
1994 mmio, mmio_pref);
1995 }
1996 } 1996 }
1997} 1997}
1998 1998
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fd0ea6af9e36..8758a2a9e6c1 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -506,7 +506,8 @@ extern bool osc_pc_lpi_support_confirmed;
506#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 506#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
507#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 507#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
508#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 508#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
509#define OSC_PCI_CONTROL_MASKS 0x0000001f 509#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
510#define OSC_PCI_CONTROL_MASKS 0x0000003f
510 511
511#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 512#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
512#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 513#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 8f87bbeceef4..514bffa11dbb 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -14,6 +14,7 @@
14#define AER_NONFATAL 0 14#define AER_NONFATAL 0
15#define AER_FATAL 1 15#define AER_FATAL 1
16#define AER_CORRECTABLE 2 16#define AER_CORRECTABLE 2
17#define DPC_FATAL 3
17 18
18struct pci_dev; 19struct pci_dev;
19 20
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 091033a6b836..e83d87fc5673 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -13,9 +13,6 @@ struct device_node;
13struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
14 unsigned int devfn); 14 unsigned int devfn);
15int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
16int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
17int of_get_pci_domain_nr(struct device_node *node);
18int of_pci_get_max_link_speed(struct device_node *node);
19void of_pci_check_probe_only(void); 16void of_pci_check_probe_only(void);
20int of_pci_map_rid(struct device_node *np, u32 rid, 17int of_pci_map_rid(struct device_node *np, u32 rid,
21 const char *map_name, const char *map_mask_name, 18 const char *map_name, const char *map_mask_name,
@@ -32,18 +29,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
32 return -EINVAL; 29 return -EINVAL;
33} 30}
34 31
35static inline int
36of_pci_parse_bus_range(struct device_node *node, struct resource *res)
37{
38 return -EINVAL;
39}
40
41static inline int
42of_get_pci_domain_nr(struct device_node *node)
43{
44 return -1;
45}
46
47static inline int of_pci_map_rid(struct device_node *np, u32 rid, 32static inline int of_pci_map_rid(struct device_node *np, u32 rid,
48 const char *map_name, const char *map_mask_name, 33 const char *map_name, const char *map_mask_name,
49 struct device_node **target, u32 *id_out) 34 struct device_node **target, u32 *id_out)
@@ -51,12 +36,6 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid,
51 return -EINVAL; 36 return -EINVAL;
52} 37}
53 38
54static inline int
55of_pci_get_max_link_speed(struct device_node *node)
56{
57 return -EINVAL;
58}
59
60static inline void of_pci_check_probe_only(void) { } 39static inline void of_pci_check_probe_only(void) { }
61#endif 40#endif
62 41
@@ -70,17 +49,4 @@ of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
70} 49}
71#endif 50#endif
72 51
73#if defined(CONFIG_OF_ADDRESS)
74int of_pci_get_host_bridge_resources(struct device_node *dev,
75 unsigned char busno, unsigned char bus_max,
76 struct list_head *resources, resource_size_t *io_base);
77#else
78static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
79 unsigned char busno, unsigned char bus_max,
80 struct list_head *resources, resource_size_t *io_base)
81{
82 return -EINVAL;
83}
84#endif
85
86#endif 52#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index baadad1aabbc..29efa09d686b 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -62,5 +62,6 @@ extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
62/* for DT-based PCI controllers that support ECAM */ 62/* for DT-based PCI controllers that support ECAM */
63int pci_host_common_probe(struct platform_device *pdev, 63int pci_host_common_probe(struct platform_device *pdev,
64 struct pci_ecam_ops *ops); 64 struct pci_ecam_ops *ops);
65int pci_host_common_remove(struct platform_device *pdev);
65#endif 66#endif
66#endif 67#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index af657ca58b70..243eaa5a66ff 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -90,8 +90,16 @@ struct pci_epc {
90 struct config_group *group; 90 struct config_group *group;
91 /* spinlock to protect against concurrent access of EP controller */ 91 /* spinlock to protect against concurrent access of EP controller */
92 spinlock_t lock; 92 spinlock_t lock;
93 unsigned int features;
93}; 94};
94 95
96#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
97#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
98#define EPC_FEATURE_SET_BAR(features, bar) \
99 (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
100#define EPC_FEATURE_GET_BAR(features) \
101 ((features & EPC_FEATURE_BAR_MASK) >> 1)
102
95#define to_pci_epc(device) container_of((device), struct pci_epc, dev) 103#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
96 104
97#define pci_epc_create(dev, ops) \ 105#define pci_epc_create(dev, ops) \
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index f7d6f4883f8b..4e7764935fa8 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -72,7 +72,7 @@ struct pci_epf_ops {
72 * @driver: PCI EPF driver 72 * @driver: PCI EPF driver
73 * @ops: set of function pointers for performing EPF operations 73 * @ops: set of function pointers for performing EPF operations
74 * @owner: the owner of the module that registers the PCI EPF driver 74 * @owner: the owner of the module that registers the PCI EPF driver
75 * @group: configfs group corresponding to the PCI EPF driver 75 * @epf_group: list of configfs group corresponding to the PCI EPF driver
76 * @id_table: identifies EPF devices for probing 76 * @id_table: identifies EPF devices for probing
77 */ 77 */
78struct pci_epf_driver { 78struct pci_epf_driver {
@@ -82,7 +82,7 @@ struct pci_epf_driver {
82 struct device_driver driver; 82 struct device_driver driver;
83 struct pci_epf_ops *ops; 83 struct pci_epf_ops *ops;
84 struct module *owner; 84 struct module *owner;
85 struct config_group *group; 85 struct list_head epf_group;
86 const struct pci_epf_device_id *id_table; 86 const struct pci_epf_device_id *id_table;
87}; 87};
88 88
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 55371cb827ad..340029b2fb38 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -217,6 +217,7 @@ enum pci_bus_flags {
217 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, 217 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
218 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, 218 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
219 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, 219 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
220 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
220}; 221};
221 222
222/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ 223/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
@@ -406,6 +407,9 @@ struct pci_dev {
406 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 407 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
407 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 408 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
408 409
410#ifdef CONFIG_HOTPLUG_PCI_PCIE
411 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
412#endif
409#ifdef CONFIG_PCIE_PTM 413#ifdef CONFIG_PCIE_PTM
410 unsigned int ptm_root:1; 414 unsigned int ptm_root:1;
411 unsigned int ptm_enabled:1; 415 unsigned int ptm_enabled:1;
@@ -471,8 +475,10 @@ struct pci_host_bridge {
471 unsigned int ignore_reset_delay:1; /* For entire hierarchy */ 475 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
472 unsigned int no_ext_tags:1; /* No Extended Tags */ 476 unsigned int no_ext_tags:1; /* No Extended Tags */
473 unsigned int native_aer:1; /* OS may use PCIe AER */ 477 unsigned int native_aer:1; /* OS may use PCIe AER */
474 unsigned int native_hotplug:1; /* OS may use PCIe hotplug */ 478 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
479 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
475 unsigned int native_pme:1; /* OS may use PCIe PME */ 480 unsigned int native_pme:1; /* OS may use PCIe PME */
481 unsigned int native_ltr:1; /* OS may use PCIe LTR */
476 /* Resource alignment requirements */ 482 /* Resource alignment requirements */
477 resource_size_t (*align_resource)(struct pci_dev *dev, 483 resource_size_t (*align_resource)(struct pci_dev *dev,
478 const struct resource *res, 484 const struct resource *res,
@@ -1079,8 +1085,6 @@ int pcie_get_readrq(struct pci_dev *dev);
1079int pcie_set_readrq(struct pci_dev *dev, int rq); 1085int pcie_set_readrq(struct pci_dev *dev, int rq);
1080int pcie_get_mps(struct pci_dev *dev); 1086int pcie_get_mps(struct pci_dev *dev);
1081int pcie_set_mps(struct pci_dev *dev, int mps); 1087int pcie_set_mps(struct pci_dev *dev, int mps);
1082int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
1083 enum pcie_link_width *width);
1084u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, 1088u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1085 enum pci_bus_speed *speed, 1089 enum pci_bus_speed *speed,
1086 enum pcie_link_width *width); 1090 enum pcie_link_width *width);
@@ -1451,8 +1455,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1451 1455
1452#ifdef CONFIG_PCIEPORTBUS 1456#ifdef CONFIG_PCIEPORTBUS
1453extern bool pcie_ports_disabled; 1457extern bool pcie_ports_disabled;
1458extern bool pcie_ports_native;
1454#else 1459#else
1455#define pcie_ports_disabled true 1460#define pcie_ports_disabled true
1461#define pcie_ports_native false
1456#endif 1462#endif
1457 1463
1458#ifdef CONFIG_PCIEASPM 1464#ifdef CONFIG_PCIEASPM
@@ -1479,6 +1485,8 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
1479static inline void pcie_ecrc_get_policy(char *str) { } 1485static inline void pcie_ecrc_get_policy(char *str) { }
1480#endif 1486#endif
1481 1487
1488bool pci_ats_disabled(void);
1489
1482#ifdef CONFIG_PCI_ATS 1490#ifdef CONFIG_PCI_ATS
1483/* Address Translation Service */ 1491/* Address Translation Service */
1484void pci_ats_init(struct pci_dev *dev); 1492void pci_ats_init(struct pci_dev *dev);
@@ -1510,12 +1518,10 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
1510 */ 1518 */
1511#ifdef CONFIG_PCI_DOMAINS 1519#ifdef CONFIG_PCI_DOMAINS
1512extern int pci_domains_supported; 1520extern int pci_domains_supported;
1513int pci_get_new_domain_nr(void);
1514#else 1521#else
1515enum { pci_domains_supported = 0 }; 1522enum { pci_domains_supported = 0 };
1516static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1523static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1517static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } 1524static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1518static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1519#endif /* CONFIG_PCI_DOMAINS */ 1525#endif /* CONFIG_PCI_DOMAINS */
1520 1526
1521/* 1527/*
@@ -1670,7 +1676,6 @@ static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1670 1676
1671static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1677static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1672static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1678static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1673static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1674 1679
1675#define dev_is_pci(d) (false) 1680#define dev_is_pci(d) (false)
1676#define dev_is_pf(d) (false) 1681#define dev_is_pf(d) (false)
@@ -1954,6 +1959,7 @@ int pci_num_vf(struct pci_dev *dev);
1954int pci_vfs_assigned(struct pci_dev *dev); 1959int pci_vfs_assigned(struct pci_dev *dev);
1955int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); 1960int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
1956int pci_sriov_get_totalvfs(struct pci_dev *dev); 1961int pci_sriov_get_totalvfs(struct pci_dev *dev);
1962int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
1957resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); 1963resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
1958void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); 1964void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
1959 1965
@@ -1986,6 +1992,7 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
1986{ return 0; } 1992{ return 0; }
1987static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) 1993static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
1988{ return 0; } 1994{ return 0; }
1995#define pci_sriov_configure_simple NULL
1989static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) 1996static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
1990{ return 0; } 1997{ return 0; }
1991static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } 1998static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
@@ -2284,7 +2291,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2284 return false; 2291 return false;
2285} 2292}
2286 2293
2287#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) 2294#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2288void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); 2295void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2289#endif 2296#endif
2290 2297
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 26213024e81b..cf5e22103f68 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -162,8 +162,9 @@ struct hotplug_params {
162#ifdef CONFIG_ACPI 162#ifdef CONFIG_ACPI
163#include <linux/acpi.h> 163#include <linux/acpi.h>
164int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); 164int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
165bool pciehp_is_native(struct pci_dev *pdev); 165bool pciehp_is_native(struct pci_dev *bridge);
166int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); 166int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
167bool shpchp_is_native(struct pci_dev *bridge);
167int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); 168int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
168int acpi_pci_detect_ejectable(acpi_handle handle); 169int acpi_pci_detect_ejectable(acpi_handle handle);
169#else 170#else
@@ -172,6 +173,17 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
172{ 173{
173 return -ENODEV; 174 return -ENODEV;
174} 175}
175static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; } 176
177static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge)
178{
179 return 0;
180}
181static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; }
182static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; }
176#endif 183#endif
184
185static inline bool hotplug_is_native(struct pci_dev *bridge)
186{
187 return pciehp_is_native(bridge) || shpchp_is_native(bridge);
188}
177#endif 189#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cc608fc55334..29502238e510 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -561,6 +561,7 @@
561#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 561#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
562#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 562#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
563#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 563#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
564#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
564#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 565#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
565#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 566#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
566#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 567#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
@@ -2119,6 +2120,8 @@
2119 2120
2120#define PCI_VENDOR_ID_MYRICOM 0x14c1 2121#define PCI_VENDOR_ID_MYRICOM 0x14c1
2121 2122
2123#define PCI_VENDOR_ID_MEDIATEK 0x14c3
2124
2122#define PCI_VENDOR_ID_TITAN 0x14D2 2125#define PCI_VENDOR_ID_TITAN 0x14D2
2123#define PCI_DEVICE_ID_TITAN_010L 0x8001 2126#define PCI_DEVICE_ID_TITAN_010L 0x8001
2124#define PCI_DEVICE_ID_TITAN_100L 0x8010 2127#define PCI_DEVICE_ID_TITAN_100L 0x8010
@@ -2387,6 +2390,8 @@
2387 2390
2388#define PCI_VENDOR_ID_LENOVO 0x17aa 2391#define PCI_VENDOR_ID_LENOVO 0x17aa
2389 2392
2393#define PCI_VENDOR_ID_QCOM 0x17cb
2394
2390#define PCI_VENDOR_ID_CDNS 0x17cd 2395#define PCI_VENDOR_ID_CDNS 0x17cd
2391 2396
2392#define PCI_VENDOR_ID_ARECA 0x17d3 2397#define PCI_VENDOR_ID_ARECA 0x17d3
@@ -2552,6 +2557,8 @@
2552#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 2557#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
2553#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 2558#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
2554 2559
2560#define PCI_VENDOR_ID_AMAZON 0x1d0f
2561
2555#define PCI_VENDOR_ID_TEKRAM 0x1de1 2562#define PCI_VENDOR_ID_TEKRAM 0x1de1
2556#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2563#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
2557 2564
@@ -2672,6 +2679,7 @@
2672#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31 2679#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
2673#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40 2680#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
2674#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f 2681#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
2682#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
2675#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 2683#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
2676#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f 2684#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
2677#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 2685#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2776,6 +2784,7 @@
2776#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 2784#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
2777#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e 2785#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
2778#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 2786#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
2787#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
2779#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910 2788#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
2780#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 2789#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
2781#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912 2790#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 9c689868eb4d..a0794632fd01 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -298,30 +298,44 @@ TRACE_EVENT(non_standard_event,
298TRACE_EVENT(aer_event, 298TRACE_EVENT(aer_event,
299 TP_PROTO(const char *dev_name, 299 TP_PROTO(const char *dev_name,
300 const u32 status, 300 const u32 status,
301 const u8 severity), 301 const u8 severity,
302 const u8 tlp_header_valid,
303 struct aer_header_log_regs *tlp),
302 304
303 TP_ARGS(dev_name, status, severity), 305 TP_ARGS(dev_name, status, severity, tlp_header_valid, tlp),
304 306
305 TP_STRUCT__entry( 307 TP_STRUCT__entry(
306 __string( dev_name, dev_name ) 308 __string( dev_name, dev_name )
307 __field( u32, status ) 309 __field( u32, status )
308 __field( u8, severity ) 310 __field( u8, severity )
311 __field( u8, tlp_header_valid)
312 __array( u32, tlp_header, 4 )
309 ), 313 ),
310 314
311 TP_fast_assign( 315 TP_fast_assign(
312 __assign_str(dev_name, dev_name); 316 __assign_str(dev_name, dev_name);
313 __entry->status = status; 317 __entry->status = status;
314 __entry->severity = severity; 318 __entry->severity = severity;
319 __entry->tlp_header_valid = tlp_header_valid;
320 if (tlp_header_valid) {
321 __entry->tlp_header[0] = tlp->dw0;
322 __entry->tlp_header[1] = tlp->dw1;
323 __entry->tlp_header[2] = tlp->dw2;
324 __entry->tlp_header[3] = tlp->dw3;
325 }
315 ), 326 ),
316 327
317 TP_printk("%s PCIe Bus Error: severity=%s, %s\n", 328 TP_printk("%s PCIe Bus Error: severity=%s, %s, TLP Header=%s\n",
318 __get_str(dev_name), 329 __get_str(dev_name),
319 __entry->severity == AER_CORRECTABLE ? "Corrected" : 330 __entry->severity == AER_CORRECTABLE ? "Corrected" :
320 __entry->severity == AER_FATAL ? 331 __entry->severity == AER_FATAL ?
321 "Fatal" : "Uncorrected, non-fatal", 332 "Fatal" : "Uncorrected, non-fatal",
322 __entry->severity == AER_CORRECTABLE ? 333 __entry->severity == AER_CORRECTABLE ?
323 __print_flags(__entry->status, "|", aer_correctable_errors) : 334 __print_flags(__entry->status, "|", aer_correctable_errors) :
324 __print_flags(__entry->status, "|", aer_uncorrectable_errors)) 335 __print_flags(__entry->status, "|", aer_uncorrectable_errors),
336 __entry->tlp_header_valid ?
337 __print_array(__entry->tlp_header, 4, 4) :
338 "Not available")
325); 339);
326 340
327/* 341/*
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 83ade9b5cf95..4da87e2ef8a8 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -657,6 +657,11 @@
657#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ 657#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
658#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */ 658#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
659#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ 659#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
660#define PCI_EXP_LNKCTL2_TLS 0x000f
661#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
662#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
663#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
664#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
660#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ 665#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
661#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */ 666#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
662#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ 667#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
@@ -983,6 +988,7 @@
983#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ 988#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
984 989
985#define PCI_EXP_DPC_CTL 6 /* DPC control */ 990#define PCI_EXP_DPC_CTL 6 /* DPC control */
991#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
986#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */ 992#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
987#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */ 993#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
988 994