aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pci/faraday,ftpci100.txt7
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt4
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt130
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.txt20
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt2
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h1
-rw-r--r--arch/mips/include/asm/pci.h1
-rw-r--r--arch/mips/pci/pci-legacy.c3
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h6
-rw-r--r--arch/x86/pci/common.c27
-rw-r--r--arch/x86/pci/fixup.c47
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c75
-rw-r--r--drivers/nvme/host/pci.c15
-rw-r--r--drivers/pci/ats.c87
-rw-r--r--drivers/pci/dwc/Kconfig11
-rw-r--r--drivers/pci/dwc/Makefile1
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c6
-rw-r--r--drivers/pci/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/dwc/pci-imx6.c72
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pci-layerscape.c6
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c2
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c5
-rw-r--r--drivers/pci/dwc/pcie-designware.h2
-rw-r--r--drivers/pci/dwc/pcie-kirin.c517
-rw-r--r--drivers/pci/dwc/pcie-qcom.c440
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/endpoint/Kconfig1
-rw-r--r--drivers/pci/host/Kconfig11
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-ftpci100.c56
-rw-r--r--drivers/pci/host/pci-hyperv.c445
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c33
-rw-r--r--drivers/pci/host/pci-versatile.c18
-rw-r--r--drivers/pci/host/pcie-mediatek.c554
-rw-r--r--drivers/pci/host/pcie-rockchip.c125
-rw-r--r--drivers/pci/host/vmd.c3
-rw-r--r--drivers/pci/iov.c4
-rw-r--r--drivers/pci/msi.c14
-rw-r--r--drivers/pci/pci-driver.c1
-rw-r--r--drivers/pci/pci-label.c7
-rw-r--r--drivers/pci/pci-sysfs.c204
-rw-r--r--drivers/pci/pci.c230
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/pcie-dpc.c4
-rw-r--r--drivers/pci/pcie/portdrv.h7
-rw-r--r--drivers/pci/pcie/portdrv_core.c104
-rw-r--r--drivers/pci/probe.c35
-rw-r--r--drivers/pci/quirks.c19
-rw-r--r--drivers/pci/switch/switchtec.c56
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/pci-ats.h10
-rw-r--r--include/linux/pci.h28
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/switchtec_ioctl.h3
-rw-r--r--kernel/irq/affinity.c13
64 files changed, 3018 insertions, 543 deletions
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
index 35d4a979bb7b..89a84f8aa621 100644
--- a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
+++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
@@ -30,6 +30,13 @@ Mandatory properties:
30 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as 30 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as
31 pre-fetchable. 31 pre-fetchable.
32 32
33Optional properties:
34- clocks: when present, this should contain the peripheral clock (PCLK) and the
35 PCI clock (PCICLK). If these are not present, they are assumed to be
36 hard-wired enabled and always on. The PCI clock will be 33 or 66 MHz.
37- clock-names: when present, this should contain "PCLK" for the peripheral
38 clock and "PCICLK" for the PCI-side clock.
39
33Mandatory subnodes: 40Mandatory subnodes:
34- For "faraday,ftpci100" a node representing the interrupt-controller inside the 41- For "faraday,ftpci100" a node representing the interrupt-controller inside the
35 host bridge is mandatory. It has the following mandatory properties: 42 host bridge is mandatory. It has the following mandatory properties:
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index e3d5680875b1..cf92d3ba5a26 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -33,6 +33,10 @@ Optional properties:
33- reset-gpio-active-high: If present then the reset sequence using the GPIO 33- reset-gpio-active-high: If present then the reset sequence using the GPIO
34 specified in the "reset-gpio" property is reversed (H=reset state, 34 specified in the "reset-gpio" property is reversed (H=reset state,
35 L=operation state). 35 L=operation state).
36- vpcie-supply: Should specify the regulator in charge of PCIe port power.
37 The regulator will be enabled when initializing the PCIe host and
38 disabled either as part of the init process or when shutting down the
39 host.
36 40
37Additional required properties for imx6sx-pcie: 41Additional required properties for imx6sx-pcie:
38- clock names: Must include the following additional entries: 42- clock names: Must include the following additional entries:
diff --git a/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt
new file mode 100644
index 000000000000..fe80dda9bf73
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt
@@ -0,0 +1,130 @@
1MediaTek Gen2 PCIe controller which is available on MT7623 series SoCs
2
3PCIe subsys supports single root complex (RC) with 3 Root Ports. Each root
4ports supports a Gen2 1-lane Link and has PIPE interface to PHY.
5
6Required properties:
7- compatible: Should contain "mediatek,mt7623-pcie".
8- device_type: Must be "pci"
9- reg: Base addresses and lengths of the PCIe controller.
10- #address-cells: Address representation for root ports (must be 3)
11- #size-cells: Size representation for root ports (must be 2)
12- #interrupt-cells: Size representation for interrupts (must be 1)
13- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
14 Please refer to the standard PCI bus binding document for a more detailed
15 explanation.
16- clocks: Must contain an entry for each entry in clock-names.
17 See ../clocks/clock-bindings.txt for details.
18- clock-names: Must include the following entries:
19 - free_ck :for reference clock of PCIe subsys
20 - sys_ck0 :for clock of Port0
21 - sys_ck1 :for clock of Port1
22 - sys_ck2 :for clock of Port2
23- resets: Must contain an entry for each entry in reset-names.
24 See ../reset/reset.txt for details.
25- reset-names: Must include the following entries:
26 - pcie-rst0 :port0 reset
27 - pcie-rst1 :port1 reset
28 - pcie-rst2 :port2 reset
29- phys: List of PHY specifiers (used by generic PHY framework).
30- phy-names : Must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
31 number of PHYs as specified in *phys* property.
32- power-domains: A phandle and power domain specifier pair to the power domain
33 which is responsible for collapsing and restoring power to the peripheral.
34- bus-range: Range of bus numbers associated with this controller.
35- ranges: Ranges for the PCI memory and I/O regions.
36
37In addition, the device tree node must have sub-nodes describing each
38PCIe port interface, having the following mandatory properties:
39
40Required properties:
41- device_type: Must be "pci"
42- reg: Only the first four bytes are used to refer to the correct bus number
43 and device number.
44- #address-cells: Must be 3
45- #size-cells: Must be 2
46- #interrupt-cells: Must be 1
47- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
48 Please refer to the standard PCI bus binding document for a more detailed
49 explanation.
50- ranges: Sub-ranges distributed from the PCIe controller node. An empty
51 property is sufficient.
52- num-lanes: Number of lanes to use for this port.
53
54Examples:
55
56 hifsys: syscon@1a000000 {
57 compatible = "mediatek,mt7623-hifsys",
58 "mediatek,mt2701-hifsys",
59 "syscon";
60 reg = <0 0x1a000000 0 0x1000>;
61 #clock-cells = <1>;
62 #reset-cells = <1>;
63 };
64
65 pcie: pcie-controller@1a140000 {
66 compatible = "mediatek,mt7623-pcie";
67 device_type = "pci";
68 reg = <0 0x1a140000 0 0x1000>, /* PCIe shared registers */
69 <0 0x1a142000 0 0x1000>, /* Port0 registers */
70 <0 0x1a143000 0 0x1000>, /* Port1 registers */
71 <0 0x1a144000 0 0x1000>; /* Port2 registers */
72 #address-cells = <3>;
73 #size-cells = <2>;
74 #interrupt-cells = <1>;
75 interrupt-map-mask = <0xf800 0 0 0>;
76 interrupt-map = <0x0000 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>,
77 <0x0800 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>,
78 <0x1000 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
79 clocks = <&topckgen CLK_TOP_ETHIF_SEL>,
80 <&hifsys CLK_HIFSYS_PCIE0>,
81 <&hifsys CLK_HIFSYS_PCIE1>,
82 <&hifsys CLK_HIFSYS_PCIE2>;
83 clock-names = "free_ck", "sys_ck0", "sys_ck1", "sys_ck2";
84 resets = <&hifsys MT2701_HIFSYS_PCIE0_RST>,
85 <&hifsys MT2701_HIFSYS_PCIE1_RST>,
86 <&hifsys MT2701_HIFSYS_PCIE2_RST>;
87 reset-names = "pcie-rst0", "pcie-rst1", "pcie-rst2";
88 phys = <&pcie0_phy>, <&pcie1_phy>, <&pcie2_phy>;
89 phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
90 power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>;
91 bus-range = <0x00 0xff>;
92 ranges = <0x81000000 0 0x1a160000 0 0x1a160000 0 0x00010000 /* I/O space */
93 0x83000000 0 0x60000000 0 0x60000000 0 0x10000000>; /* memory space */
94
95 pcie@0,0 {
96 device_type = "pci";
97 reg = <0x0000 0 0 0 0>;
98 #address-cells = <3>;
99 #size-cells = <2>;
100 #interrupt-cells = <1>;
101 interrupt-map-mask = <0 0 0 0>;
102 interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
103 ranges;
104 num-lanes = <1>;
105 };
106
107 pcie@1,0 {
108 device_type = "pci";
109 reg = <0x0800 0 0 0 0>;
110 #address-cells = <3>;
111 #size-cells = <2>;
112 #interrupt-cells = <1>;
113 interrupt-map-mask = <0 0 0 0>;
114 interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
115 ranges;
116 num-lanes = <1>;
117 };
118
119 pcie@2,0 {
120 device_type = "pci";
121 reg = <0x1000 0 0 0 0>;
122 #address-cells = <3>;
123 #size-cells = <2>;
124 #interrupt-cells = <1>;
125 interrupt-map-mask = <0 0 0 0>;
126 interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
127 ranges;
128 num-lanes = <1>;
129 };
130 };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
index e15f9b19901f..9d418b71774f 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
@@ -8,6 +8,7 @@
8 - "qcom,pcie-apq8064" for apq8064 8 - "qcom,pcie-apq8064" for apq8064
9 - "qcom,pcie-apq8084" for apq8084 9 - "qcom,pcie-apq8084" for apq8084
10 - "qcom,pcie-msm8996" for msm8996 or apq8096 10 - "qcom,pcie-msm8996" for msm8996 or apq8096
11 - "qcom,pcie-ipq4019" for ipq4019
11 12
12- reg: 13- reg:
13 Usage: required 14 Usage: required
@@ -87,7 +88,7 @@
87 - "core" Clocks the pcie hw block 88 - "core" Clocks the pcie hw block
88 - "phy" Clocks the pcie PHY block 89 - "phy" Clocks the pcie PHY block
89- clock-names: 90- clock-names:
90 Usage: required for apq8084 91 Usage: required for apq8084/ipq4019
91 Value type: <stringlist> 92 Value type: <stringlist>
92 Definition: Should contain the following entries 93 Definition: Should contain the following entries
93 - "aux" Auxiliary (AUX) clock 94 - "aux" Auxiliary (AUX) clock
@@ -126,6 +127,23 @@
126 Definition: Should contain the following entries 127 Definition: Should contain the following entries
127 - "core" Core reset 128 - "core" Core reset
128 129
130- reset-names:
131 Usage: required for ipq/apq8064
132 Value type: <stringlist>
133 Definition: Should contain the following entries
134 - "axi_m" AXI master reset
135 - "axi_s" AXI slave reset
136 - "pipe" PIPE reset
137 - "axi_m_vmid" VMID reset
138 - "axi_s_xpu" XPU reset
139 - "parf" PARF reset
140 - "phy" PHY reset
141 - "axi_m_sticky" AXI sticky reset
142 - "pipe_sticky" PIPE sticky reset
143 - "pwr" PWR reset
144 - "ahb" AHB reset
145 - "phy_ahb" PHY AHB reset
146
129- power-domains: 147- power-domains:
130 Usage: required for apq8084 and msm8996/apq8096 148 Usage: required for apq8084 and msm8996/apq8096
131 Value type: <prop-encoded-array> 149 Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 34712d6fd253..bd27428dda61 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -1,4 +1,4 @@
1* Renesas RCar PCIe interface 1* Renesas R-Car PCIe interface
2 2
3Required properties: 3Required properties:
4compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC; 4compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
diff --git a/MAINTAINERS b/MAINTAINERS
index f7d568b8f133..82b19f5ae5ad 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9933,9 +9933,16 @@ S: Maintained
9933F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt 9933F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
9934F: drivers/pci/dwc/pcie-hisi.c 9934F: drivers/pci/dwc/pcie-hisi.c
9935 9935
9936PCIE DRIVER FOR HISILICON KIRIN
9937M: Xiaowei Song <songxiaowei@hisilicon.com>
9938M: Binghui Wang <wangbinghui@hisilicon.com>
9939L: linux-pci@vger.kernel.org
9940S: Maintained
9941F: Documentation/devicetree/bindings/pci/pcie-kirin.txt
9942F: drivers/pci/dwc/pcie-kirin.c
9943
9936PCIE DRIVER FOR ROCKCHIP 9944PCIE DRIVER FOR ROCKCHIP
9937M: Shawn Lin <shawn.lin@rock-chips.com> 9945M: Shawn Lin <shawn.lin@rock-chips.com>
9938M: Wenrui Li <wenrui.li@rock-chips.com>
9939L: linux-pci@vger.kernel.org 9946L: linux-pci@vger.kernel.org
9940L: linux-rockchip@lists.infradead.org 9947L: linux-rockchip@lists.infradead.org
9941S: Maintained 9948S: Maintained
@@ -9957,6 +9964,14 @@ S: Supported
9957F: Documentation/devicetree/bindings/pci/pci-thunder-* 9964F: Documentation/devicetree/bindings/pci/pci-thunder-*
9958F: drivers/pci/host/pci-thunder-* 9965F: drivers/pci/host/pci-thunder-*
9959 9966
9967PCIE DRIVER FOR MEDIATEK
9968M: Ryder Lee <ryder.lee@mediatek.com>
9969L: linux-pci@vger.kernel.org
9970L: linux-mediatek@lists.infradead.org
9971S: Supported
9972F: Documentation/devicetree/bindings/pci/mediatek*
9973F: drivers/pci/host/*mediatek*
9974
9960PCMCIA SUBSYSTEM 9975PCMCIA SUBSYSTEM
9961P: Linux PCMCIA Team 9976P: Linux PCMCIA Team
9962L: linux-pcmcia@lists.infradead.org 9977L: linux-pcmcia@lists.infradead.org
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
index 8a7ecb4d5c64..bf9dd9eb4ceb 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
+++ b/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
@@ -80,7 +80,6 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
80#define PCI_BAR3_REG 0x1c 80#define PCI_BAR3_REG 0x1c
81#define PCI_BAR4_REG 0x20 81#define PCI_BAR4_REG 0x20
82#define PCI_BAR5_REG 0x24 82#define PCI_BAR5_REG 0x24
83#define PCI_BAR_COUNT 6
84#define PCI_BAR_RANGE_MASK 0xFFFFFFFF 83#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
85 84
86/* CARDBUS CIS POINTER */ 85/* CARDBUS CIS POINTER */
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 1000c1b4c875..52f551ee492d 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -39,7 +39,6 @@ struct pci_controller {
39 unsigned long io_offset; 39 unsigned long io_offset;
40 unsigned long io_map_base; 40 unsigned long io_map_base;
41 struct resource *busn_resource; 41 struct resource *busn_resource;
42 unsigned long busn_offset;
43 42
44#ifndef CONFIG_PCI_DOMAINS_GENERIC 43#ifndef CONFIG_PCI_DOMAINS_GENERIC
45 unsigned int index; 44 unsigned int index;
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 3a84f6c0c840..174575a9a112 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -86,8 +86,7 @@ static void pcibios_scanbus(struct pci_controller *hose)
86 hose->mem_resource, hose->mem_offset); 86 hose->mem_resource, hose->mem_offset);
87 pci_add_resource_offset(&resources, 87 pci_add_resource_offset(&resources,
88 hose->io_resource, hose->io_offset); 88 hose->io_resource, hose->io_offset);
89 pci_add_resource_offset(&resources, 89 pci_add_resource(&resources, hose->busn_resource);
90 hose->busn_resource, hose->busn_offset);
91 bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose, 90 bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
92 &resources); 91 &resources);
93 hose->bus = bus; 92 hose->bus = bus;
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 432df4b1baec..237ec6cda206 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -153,6 +153,12 @@
153#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) 153#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
154 154
155/* 155/*
156 * HV_VP_SET available
157 */
158#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11)
159
160
161/*
156 * Crash notification flag. 162 * Crash notification flag.
157 */ 163 */
158#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63) 164#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 190e718694b1..1f9f2ee7c421 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -24,7 +24,6 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
24 24
25unsigned int pci_early_dump_regs; 25unsigned int pci_early_dump_regs;
26static int pci_bf_sort; 26static int pci_bf_sort;
27static int smbios_type_b1_flag;
28int pci_routeirq; 27int pci_routeirq;
29int noioapicquirk; 28int noioapicquirk;
30#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS 29#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -197,34 +196,18 @@ static int __init set_bf_sort(const struct dmi_system_id *d)
197static void __init read_dmi_type_b1(const struct dmi_header *dm, 196static void __init read_dmi_type_b1(const struct dmi_header *dm,
198 void *private_data) 197 void *private_data)
199{ 198{
200 u8 *d = (u8 *)dm + 4; 199 u8 *data = (u8 *)dm + 4;
201 200
202 if (dm->type != 0xB1) 201 if (dm->type != 0xB1)
203 return; 202 return;
204 switch (((*(u32 *)d) >> 9) & 0x03) { 203 if ((((*(u32 *)data) >> 9) & 0x03) == 0x01)
205 case 0x00: 204 set_bf_sort((const struct dmi_system_id *)private_data);
206 printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n");
207 break;
208 case 0x01: /* set pci=bfsort */
209 smbios_type_b1_flag = 1;
210 break;
211 case 0x02: /* do not set pci=bfsort */
212 smbios_type_b1_flag = 2;
213 break;
214 default:
215 break;
216 }
217} 205}
218 206
219static int __init find_sort_method(const struct dmi_system_id *d) 207static int __init find_sort_method(const struct dmi_system_id *d)
220{ 208{
221 dmi_walk(read_dmi_type_b1, NULL); 209 dmi_walk(read_dmi_type_b1, (void *)d);
222 210 return 0;
223 if (smbios_type_b1_flag == 1) {
224 set_bf_sort(d);
225 return 0;
226 }
227 return -1;
228} 211}
229 212
230/* 213/*
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 6d52b94f4bb9..11e407489db0 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -571,3 +571,50 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
571DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); 571DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
572DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); 572DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
573DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); 573DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
574
575/*
576 * Device [1022:7808]
577 * 23. USB Wake on Connect/Disconnect with Low Speed Devices
578 * https://support.amd.com/TechDocs/46837.pdf
579 * Appendix A2
580 * https://support.amd.com/TechDocs/42413.pdf
581 */
582static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
583{
584 dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
585 dev->pme_support &= ~((PCI_PM_CAP_PME_D3 | PCI_PM_CAP_PME_D3cold)
586 >> PCI_PM_CAP_PME_SHIFT);
587}
588DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
589
590/*
591 * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
592 *
593 * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
594 * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
595 * for soft poweroff and suspend-to-RAM.
596 *
597 * As far as we know, this is related to the address space, not to the Root
598 * Port itself. Attaching the quirk to the Root Port is a convenience, but
599 * it could probably also be a standalone DMI quirk.
600 *
601 * https://bugzilla.kernel.org/show_bug.cgi?id=103211
602 */
603static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
604{
605 struct device *dev = &pdev->dev;
606 struct resource *res;
607
608 if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
609 !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
610 pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
611 return;
612
613 res = request_mem_region(0x7fa00000, 0x200000,
614 "MacBook Pro poweroff workaround");
615 if (res)
616 dev_info(dev, "claimed %s %pR\n", res->name, res);
617 else
618 dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
619}
620DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index c1bdb9edcae7..76595408ff53 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -46,7 +46,7 @@ static inline void set_bios_x(void)
46 pcibios_enabled = 1; 46 pcibios_enabled = 1;
47 set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT); 47 set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
48 if (__supported_pte_mask & _PAGE_NX) 48 if (__supported_pte_mask & _PAGE_NX)
49 printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n"); 49 printk(KERN_INFO "PCI: PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
50} 50}
51 51
52/* 52/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 43ca16b6eee2..bbac5d5d1fcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1152,16 +1152,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1152 return; 1152 return;
1153 1153
1154 if (state == VGA_SWITCHEROO_ON) { 1154 if (state == VGA_SWITCHEROO_ON) {
1155 unsigned d3_delay = dev->pdev->d3_delay;
1156
1157 pr_info("amdgpu: switched on\n"); 1155 pr_info("amdgpu: switched on\n");
1158 /* don't suspend or resume card normally */ 1156 /* don't suspend or resume card normally */
1159 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1157 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1160 1158
1161 amdgpu_device_resume(dev, true, true); 1159 amdgpu_device_resume(dev, true, true);
1162 1160
1163 dev->pdev->d3_delay = d3_delay;
1164
1165 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1161 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1166 drm_kms_helper_poll_enable(dev); 1162 drm_kms_helper_poll_enable(dev);
1167 } else { 1163 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6ecf42783d4b..aecaafbc8417 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -113,7 +113,6 @@ static inline bool radeon_is_atpx_hybrid(void) { return false; }
113#endif 113#endif
114 114
115#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 115#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
116#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
117 116
118struct radeon_px_quirk { 117struct radeon_px_quirk {
119 u32 chip_vendor; 118 u32 chip_vendor;
@@ -136,8 +135,6 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381 135 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
137 */ 136 */
138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 137 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
139 /* macbook pro 8.2 */
140 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
141 { 0, 0, 0, 0, 0 }, 138 { 0, 0, 0, 0, 0 },
142}; 139};
143 140
@@ -1241,25 +1238,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1241static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1238static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1242{ 1239{
1243 struct drm_device *dev = pci_get_drvdata(pdev); 1240 struct drm_device *dev = pci_get_drvdata(pdev);
1244 struct radeon_device *rdev = dev->dev_private;
1245 1241
1246 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) 1242 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1247 return; 1243 return;
1248 1244
1249 if (state == VGA_SWITCHEROO_ON) { 1245 if (state == VGA_SWITCHEROO_ON) {
1250 unsigned d3_delay = dev->pdev->d3_delay;
1251
1252 pr_info("radeon: switched on\n"); 1246 pr_info("radeon: switched on\n");
1253 /* don't suspend or resume card normally */ 1247 /* don't suspend or resume card normally */
1254 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1248 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1255 1249
1256 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
1257 dev->pdev->d3_delay = 20;
1258
1259 radeon_resume_kms(dev, true, true); 1250 radeon_resume_kms(dev, true, true);
1260 1251
1261 dev->pdev->d3_delay = d3_delay;
1262
1263 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1252 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1264 drm_kms_helper_poll_enable(dev); 1253 drm_kms_helper_poll_enable(dev);
1265 } else { 1254 } else {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 3e26d27ad213..63784576ae8b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2348,30 +2348,19 @@ static void fm10k_io_resume(struct pci_dev *pdev)
2348 netif_device_attach(netdev); 2348 netif_device_attach(netdev);
2349} 2349}
2350 2350
2351/** 2351static void fm10k_io_reset_prepare(struct pci_dev *pdev)
2352 * fm10k_io_reset_notify - called when PCI function is reset
2353 * @pdev: Pointer to PCI device
2354 *
2355 * This callback is called when the PCI function is reset such as from
2356 * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it
2357 * means we should prepare for a function reset. If prepare is false, it means
2358 * the function reset just occurred.
2359 */
2360static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare)
2361{ 2352{
2362 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2353 /* warn incase we have any active VF devices */
2363 int err = 0; 2354 if (pci_num_vf(pdev))
2364 2355 dev_warn(&pdev->dev,
2365 if (prepare) { 2356 "PCIe FLR may cause issues for any active VF devices\n");
2366 /* warn incase we have any active VF devices */ 2357 fm10k_prepare_suspend(pci_get_drvdata(pdev));
2367 if (pci_num_vf(pdev)) 2358}
2368 dev_warn(&pdev->dev,
2369 "PCIe FLR may cause issues for any active VF devices\n");
2370 2359
2371 fm10k_prepare_suspend(interface); 2360static void fm10k_io_reset_done(struct pci_dev *pdev)
2372 } else { 2361{
2373 err = fm10k_handle_resume(interface); 2362 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2374 } 2363 int err = fm10k_handle_resume(interface);
2375 2364
2376 if (err) { 2365 if (err) {
2377 dev_warn(&pdev->dev, 2366 dev_warn(&pdev->dev,
@@ -2384,7 +2373,8 @@ static const struct pci_error_handlers fm10k_err_handler = {
2384 .error_detected = fm10k_io_error_detected, 2373 .error_detected = fm10k_io_error_detected,
2385 .slot_reset = fm10k_io_slot_reset, 2374 .slot_reset = fm10k_io_slot_reset,
2386 .resume = fm10k_io_resume, 2375 .resume = fm10k_io_resume,
2387 .reset_notify = fm10k_io_reset_notify, 2376 .reset_prepare = fm10k_io_reset_prepare,
2377 .reset_done = fm10k_io_reset_done,
2388}; 2378};
2389 2379
2390static struct pci_driver fm10k_driver = { 2380static struct pci_driver fm10k_driver = {
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index ac62bce50e96..279adf124fc9 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -346,11 +346,13 @@ static const struct pci_device_id mwifiex_ids[] = {
346 346
347MODULE_DEVICE_TABLE(pci, mwifiex_ids); 347MODULE_DEVICE_TABLE(pci, mwifiex_ids);
348 348
349static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) 349/*
350 * Cleanup all software without cleaning anything related to PCIe and HW.
351 */
352static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
350{ 353{
351 struct pcie_service_card *card = pci_get_drvdata(pdev); 354 struct pcie_service_card *card = pci_get_drvdata(pdev);
352 struct mwifiex_adapter *adapter = card->adapter; 355 struct mwifiex_adapter *adapter = card->adapter;
353 int ret;
354 356
355 if (!adapter) { 357 if (!adapter) {
356 dev_err(&pdev->dev, "%s: adapter structure is not valid\n", 358 dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
@@ -359,37 +361,48 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
359 } 361 }
360 362
361 mwifiex_dbg(adapter, INFO, 363 mwifiex_dbg(adapter, INFO,
362 "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n", 364 "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Pre-FLR\n",
363 __func__, pdev->vendor, pdev->device, 365 __func__, pdev->vendor, pdev->device, pdev->revision);
364 pdev->revision, 366
365 prepare ? "Pre-FLR" : "Post-FLR"); 367 mwifiex_shutdown_sw(adapter);
366 368 adapter->surprise_removed = true;
367 if (prepare) { 369 clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
368 /* Kernel would be performing FLR after this notification. 370 clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
369 * Cleanup all software without cleaning anything related to
370 * PCIe and HW.
371 */
372 mwifiex_shutdown_sw(adapter);
373 adapter->surprise_removed = true;
374 clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
375 clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
376 } else {
377 /* Kernel stores and restores PCIe function context before and
378 * after performing FLR respectively. Reconfigure the software
379 * and firmware including firmware redownload
380 */
381 adapter->surprise_removed = false;
382 ret = mwifiex_reinit_sw(adapter);
383 if (ret) {
384 dev_err(&pdev->dev, "reinit failed: %d\n", ret);
385 return;
386 }
387 }
388 mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); 371 mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
389} 372}
390 373
391static const struct pci_error_handlers mwifiex_pcie_err_handler[] = { 374/*
392 { .reset_notify = mwifiex_pcie_reset_notify, }, 375 * Kernel stores and restores PCIe function context before and after performing
376 * FLR respectively. Reconfigure the software and firmware including firmware
377 * redownload.
378 */
379static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
380{
381 struct pcie_service_card *card = pci_get_drvdata(pdev);
382 struct mwifiex_adapter *adapter = card->adapter;
383 int ret;
384
385 if (!adapter) {
386 dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
387 __func__);
388 return;
389 }
390
391 mwifiex_dbg(adapter, INFO,
392 "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Post-FLR\n",
393 __func__, pdev->vendor, pdev->device, pdev->revision);
394
395 adapter->surprise_removed = false;
396 ret = mwifiex_reinit_sw(adapter);
397 if (ret)
398 dev_err(&pdev->dev, "reinit failed: %d\n", ret);
399 else
400 mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
401}
402
403static const struct pci_error_handlers mwifiex_pcie_err_handler = {
404 .reset_prepare = mwifiex_pcie_reset_prepare,
405 .reset_done = mwifiex_pcie_reset_done,
393}; 406};
394 407
395#ifdef CONFIG_PM_SLEEP 408#ifdef CONFIG_PM_SLEEP
@@ -410,7 +423,7 @@ static struct pci_driver __refdata mwifiex_pcie = {
410 }, 423 },
411#endif 424#endif
412 .shutdown = mwifiex_pcie_shutdown, 425 .shutdown = mwifiex_pcie_shutdown,
413 .err_handler = mwifiex_pcie_err_handler, 426 .err_handler = &mwifiex_pcie_err_handler,
414}; 427};
415 428
416/* 429/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fed803232edc..9a3d69b8df98 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2145,14 +2145,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2145 return result; 2145 return result;
2146} 2146}
2147 2147
2148static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 2148static void nvme_reset_prepare(struct pci_dev *pdev)
2149{ 2149{
2150 struct nvme_dev *dev = pci_get_drvdata(pdev); 2150 nvme_dev_disable(pci_get_drvdata(pdev), false);
2151}
2151 2152
2152 if (prepare) 2153static void nvme_reset_done(struct pci_dev *pdev)
2153 nvme_dev_disable(dev, false); 2154{
2154 else 2155 nvme_reset(pci_get_drvdata(pdev));
2155 nvme_reset(dev);
2156} 2156}
2157 2157
2158static void nvme_shutdown(struct pci_dev *pdev) 2158static void nvme_shutdown(struct pci_dev *pdev)
@@ -2275,7 +2275,8 @@ static const struct pci_error_handlers nvme_err_handler = {
2275 .error_detected = nvme_error_detected, 2275 .error_detected = nvme_error_detected,
2276 .slot_reset = nvme_slot_reset, 2276 .slot_reset = nvme_slot_reset,
2277 .resume = nvme_error_resume, 2277 .resume = nvme_error_resume,
2278 .reset_notify = nvme_reset_notify, 2278 .reset_prepare = nvme_reset_prepare,
2279 .reset_done = nvme_reset_done,
2279}; 2280};
2280 2281
2281static const struct pci_device_id nvme_id_table[] = { 2282static const struct pci_device_id nvme_id_table[] = {
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index eeb9fb2b47aa..ad8ddbbbf245 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -153,23 +153,27 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
153 u32 max_requests; 153 u32 max_requests;
154 int pos; 154 int pos;
155 155
156 if (WARN_ON(pdev->pri_enabled))
157 return -EBUSY;
158
156 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); 159 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
157 if (!pos) 160 if (!pos)
158 return -EINVAL; 161 return -EINVAL;
159 162
160 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
161 pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); 163 pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
162 if ((control & PCI_PRI_CTRL_ENABLE) || 164 if (!(status & PCI_PRI_STATUS_STOPPED))
163 !(status & PCI_PRI_STATUS_STOPPED))
164 return -EBUSY; 165 return -EBUSY;
165 166
166 pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests); 167 pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
167 reqs = min(max_requests, reqs); 168 reqs = min(max_requests, reqs);
169 pdev->pri_reqs_alloc = reqs;
168 pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); 170 pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
169 171
170 control |= PCI_PRI_CTRL_ENABLE; 172 control = PCI_PRI_CTRL_ENABLE;
171 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); 173 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
172 174
175 pdev->pri_enabled = 1;
176
173 return 0; 177 return 0;
174} 178}
175EXPORT_SYMBOL_GPL(pci_enable_pri); 179EXPORT_SYMBOL_GPL(pci_enable_pri);
@@ -185,6 +189,9 @@ void pci_disable_pri(struct pci_dev *pdev)
185 u16 control; 189 u16 control;
186 int pos; 190 int pos;
187 191
192 if (WARN_ON(!pdev->pri_enabled))
193 return;
194
188 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); 195 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
189 if (!pos) 196 if (!pos)
190 return; 197 return;
@@ -192,10 +199,34 @@ void pci_disable_pri(struct pci_dev *pdev)
192 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); 199 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
193 control &= ~PCI_PRI_CTRL_ENABLE; 200 control &= ~PCI_PRI_CTRL_ENABLE;
194 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); 201 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
202
203 pdev->pri_enabled = 0;
195} 204}
196EXPORT_SYMBOL_GPL(pci_disable_pri); 205EXPORT_SYMBOL_GPL(pci_disable_pri);
197 206
198/** 207/**
208 * pci_restore_pri_state - Restore PRI
209 * @pdev: PCI device structure
210 */
211void pci_restore_pri_state(struct pci_dev *pdev)
212{
213 u16 control = PCI_PRI_CTRL_ENABLE;
214 u32 reqs = pdev->pri_reqs_alloc;
215 int pos;
216
217 if (!pdev->pri_enabled)
218 return;
219
220 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
221 if (!pos)
222 return;
223
224 pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
225 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
226}
227EXPORT_SYMBOL_GPL(pci_restore_pri_state);
228
229/**
199 * pci_reset_pri - Resets device's PRI state 230 * pci_reset_pri - Resets device's PRI state
200 * @pdev: PCI device structure 231 * @pdev: PCI device structure
201 * 232 *
@@ -207,16 +238,14 @@ int pci_reset_pri(struct pci_dev *pdev)
207 u16 control; 238 u16 control;
208 int pos; 239 int pos;
209 240
241 if (WARN_ON(pdev->pri_enabled))
242 return -EBUSY;
243
210 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); 244 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
211 if (!pos) 245 if (!pos)
212 return -EINVAL; 246 return -EINVAL;
213 247
214 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); 248 control = PCI_PRI_CTRL_RESET;
215 if (control & PCI_PRI_CTRL_ENABLE)
216 return -EBUSY;
217
218 control |= PCI_PRI_CTRL_RESET;
219
220 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); 249 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
221 250
222 return 0; 251 return 0;
@@ -239,16 +268,14 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
239 u16 control, supported; 268 u16 control, supported;
240 int pos; 269 int pos;
241 270
271 if (WARN_ON(pdev->pasid_enabled))
272 return -EBUSY;
273
242 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); 274 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
243 if (!pos) 275 if (!pos)
244 return -EINVAL; 276 return -EINVAL;
245 277
246 pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
247 pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported); 278 pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
248
249 if (control & PCI_PASID_CTRL_ENABLE)
250 return -EINVAL;
251
252 supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; 279 supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
253 280
254 /* User wants to enable anything unsupported? */ 281 /* User wants to enable anything unsupported? */
@@ -256,9 +283,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
256 return -EINVAL; 283 return -EINVAL;
257 284
258 control = PCI_PASID_CTRL_ENABLE | features; 285 control = PCI_PASID_CTRL_ENABLE | features;
286 pdev->pasid_features = features;
259 287
260 pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); 288 pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
261 289
290 pdev->pasid_enabled = 1;
291
262 return 0; 292 return 0;
263} 293}
264EXPORT_SYMBOL_GPL(pci_enable_pasid); 294EXPORT_SYMBOL_GPL(pci_enable_pasid);
@@ -266,22 +296,47 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
266/** 296/**
267 * pci_disable_pasid - Disable the PASID capability 297 * pci_disable_pasid - Disable the PASID capability
268 * @pdev: PCI device structure 298 * @pdev: PCI device structure
269 *
270 */ 299 */
271void pci_disable_pasid(struct pci_dev *pdev) 300void pci_disable_pasid(struct pci_dev *pdev)
272{ 301{
273 u16 control = 0; 302 u16 control = 0;
274 int pos; 303 int pos;
275 304
305 if (WARN_ON(!pdev->pasid_enabled))
306 return;
307
276 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); 308 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
277 if (!pos) 309 if (!pos)
278 return; 310 return;
279 311
280 pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); 312 pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
313
314 pdev->pasid_enabled = 0;
281} 315}
282EXPORT_SYMBOL_GPL(pci_disable_pasid); 316EXPORT_SYMBOL_GPL(pci_disable_pasid);
283 317
284/** 318/**
319 * pci_restore_pasid_state - Restore PASID capabilities
320 * @pdev: PCI device structure
321 */
322void pci_restore_pasid_state(struct pci_dev *pdev)
323{
324 u16 control;
325 int pos;
326
327 if (!pdev->pasid_enabled)
328 return;
329
330 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
331 if (!pos)
332 return;
333
334 control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
335 pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
336}
337EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
338
339/**
285 * pci_pasid_features - Check which PASID features are supported 340 * pci_pasid_features - Check which PASID features are supported
286 * @pdev: PCI device structure 341 * @pdev: PCI device structure
287 * 342 *
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index b7e15526d676..d275aadc47ee 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -16,6 +16,7 @@ config PCIE_DW_EP
16 16
17config PCI_DRA7XX 17config PCI_DRA7XX
18 bool "TI DRA7xx PCIe controller" 18 bool "TI DRA7xx PCIe controller"
19 depends on SOC_DRA7XX || COMPILE_TEST
19 depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT 20 depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
20 depends on OF && HAS_IOMEM && TI_PIPE3 21 depends on OF && HAS_IOMEM && TI_PIPE3
21 help 22 help
@@ -158,4 +159,14 @@ config PCIE_ARTPEC6
158 Say Y here to enable PCIe controller support on Axis ARTPEC-6 159 Say Y here to enable PCIe controller support on Axis ARTPEC-6
159 SoCs. This PCIe controller uses the DesignWare core. 160 SoCs. This PCIe controller uses the DesignWare core.
160 161
162config PCIE_KIRIN
163 depends on OF && ARM64
164 bool "HiSilicon Kirin series SoCs PCIe controllers"
165 depends on PCI
166 select PCIEPORTBUS
167 select PCIE_DW_HOST
168 help
169 Say Y here if you want PCIe controller support
170 on HiSilicon Kirin series SoCs.
171
161endmenu 172endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index f31a8596442a..c61be9738cce 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
13obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o 13obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
14obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o 14obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
15obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o 15obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
16obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
16 17
17# The following drivers are for devices that use the generic ACPI 18# The following drivers are for devices that use the generic ACPI
18# pci_root.c driver but don't support standard ECAM config access. 19# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 8decf46cf525..f2fc5f47064e 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
174static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 174static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
175{ 175{
176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
177 ~LEG_EP_INTERRUPTS & ~MSI); 177 LEG_EP_INTERRUPTS | MSI);
178 178
179 dra7xx_pcie_writel(dra7xx, 179 dra7xx_pcie_writel(dra7xx,
180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
@@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
184static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 184static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
185{ 185{
186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
187 ~INTERRUPTS); 187 INTERRUPTS);
188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
189 INTERRUPTS); 189 INTERRUPTS);
190} 190}
@@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
208 dra7xx_pcie_enable_interrupts(dra7xx); 208 dra7xx_pcie_enable_interrupts(dra7xx);
209} 209}
210 210
211static struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 211static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
212 .host_init = dra7xx_pcie_host_init, 212 .host_init = dra7xx_pcie_host_init,
213}; 213};
214 214
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 546082ad5a3f..c78c06552590 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp)
590 exynos_pcie_enable_interrupts(ep); 590 exynos_pcie_enable_interrupts(ep);
591} 591}
592 592
593static struct dw_pcie_host_ops exynos_pcie_host_ops = { 593static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
594 .rd_own_conf = exynos_pcie_rd_own_conf, 594 .rd_own_conf = exynos_pcie_rd_own_conf,
595 .wr_own_conf = exynos_pcie_wr_own_conf, 595 .wr_own_conf = exynos_pcie_wr_own_conf,
596 .host_init = exynos_pcie_host_init, 596 .host_init = exynos_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index a98cba55c7f0..bf5c3616e344 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -24,6 +24,7 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/regmap.h> 26#include <linux/regmap.h>
27#include <linux/regulator/consumer.h>
27#include <linux/resource.h> 28#include <linux/resource.h>
28#include <linux/signal.h> 29#include <linux/signal.h>
29#include <linux/types.h> 30#include <linux/types.h>
@@ -59,6 +60,7 @@ struct imx6_pcie {
59 u32 tx_swing_full; 60 u32 tx_swing_full;
60 u32 tx_swing_low; 61 u32 tx_swing_low;
61 int link_gen; 62 int link_gen;
63 struct regulator *vpcie;
62}; 64};
63 65
64/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 66/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -252,11 +254,40 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
252static int imx6q_pcie_abort_handler(unsigned long addr, 254static int imx6q_pcie_abort_handler(unsigned long addr,
253 unsigned int fsr, struct pt_regs *regs) 255 unsigned int fsr, struct pt_regs *regs)
254{ 256{
255 return 0; 257 unsigned long pc = instruction_pointer(regs);
258 unsigned long instr = *(unsigned long *)pc;
259 int reg = (instr >> 12) & 15;
260
261 /*
262 * If the instruction being executed was a read,
263 * make it look like it read all-ones.
264 */
265 if ((instr & 0x0c100000) == 0x04100000) {
266 unsigned long val;
267
268 if (instr & 0x00400000)
269 val = 255;
270 else
271 val = -1;
272
273 regs->uregs[reg] = val;
274 regs->ARM_pc += 4;
275 return 0;
276 }
277
278 if ((instr & 0x0e100090) == 0x00100090) {
279 regs->uregs[reg] = -1;
280 regs->ARM_pc += 4;
281 return 0;
282 }
283
284 return 1;
256} 285}
257 286
258static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 287static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
259{ 288{
289 struct device *dev = imx6_pcie->pci->dev;
290
260 switch (imx6_pcie->variant) { 291 switch (imx6_pcie->variant) {
261 case IMX7D: 292 case IMX7D:
262 reset_control_assert(imx6_pcie->pciephy_reset); 293 reset_control_assert(imx6_pcie->pciephy_reset);
@@ -283,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
283 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); 314 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
284 break; 315 break;
285 } 316 }
317
318 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
319 int ret = regulator_disable(imx6_pcie->vpcie);
320
321 if (ret)
322 dev_err(dev, "failed to disable vpcie regulator: %d\n",
323 ret);
324 }
286} 325}
287 326
288static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) 327static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
@@ -349,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
349 struct device *dev = pci->dev; 388 struct device *dev = pci->dev;
350 int ret; 389 int ret;
351 390
391 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
392 ret = regulator_enable(imx6_pcie->vpcie);
393 if (ret) {
394 dev_err(dev, "failed to enable vpcie regulator: %d\n",
395 ret);
396 return;
397 }
398 }
399
352 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 400 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
353 if (ret) { 401 if (ret) {
354 dev_err(dev, "unable to enable pcie_phy clock\n"); 402 dev_err(dev, "unable to enable pcie_phy clock\n");
355 return; 403 goto err_pcie_phy;
356 } 404 }
357 405
358 ret = clk_prepare_enable(imx6_pcie->pcie_bus); 406 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
@@ -412,6 +460,13 @@ err_pcie:
412 clk_disable_unprepare(imx6_pcie->pcie_bus); 460 clk_disable_unprepare(imx6_pcie->pcie_bus);
413err_pcie_bus: 461err_pcie_bus:
414 clk_disable_unprepare(imx6_pcie->pcie_phy); 462 clk_disable_unprepare(imx6_pcie->pcie_phy);
463err_pcie_phy:
464 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
465 ret = regulator_disable(imx6_pcie->vpcie);
466 if (ret)
467 dev_err(dev, "failed to disable vpcie regulator: %d\n",
468 ret);
469 }
415} 470}
416 471
417static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) 472static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
@@ -602,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci)
602 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; 657 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
603} 658}
604 659
605static struct dw_pcie_host_ops imx6_pcie_host_ops = { 660static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
606 .host_init = imx6_pcie_host_init, 661 .host_init = imx6_pcie_host_init,
607}; 662};
608 663
@@ -775,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
775 if (ret) 830 if (ret)
776 imx6_pcie->link_gen = 1; 831 imx6_pcie->link_gen = 1;
777 832
833 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
834 if (IS_ERR(imx6_pcie->vpcie)) {
835 if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
836 return -EPROBE_DEFER;
837 imx6_pcie->vpcie = NULL;
838 }
839
778 platform_set_drvdata(pdev, imx6_pcie); 840 platform_set_drvdata(pdev, imx6_pcie);
779 841
780 ret = imx6_add_pcie_port(imx6_pcie, pdev); 842 ret = imx6_add_pcie_port(imx6_pcie, pdev);
@@ -819,8 +881,8 @@ static int __init imx6_pcie_init(void)
819 * we can install the handler here without risking it 881 * we can install the handler here without risking it
820 * accessing some uninitialized driver state. 882 * accessing some uninitialized driver state.
821 */ 883 */
822 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, 884 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
823 "imprecise external abort"); 885 "external abort on non-linefetch");
824 886
825 return platform_driver_register(&imx6_pcie_driver); 887 return platform_driver_register(&imx6_pcie_driver);
826} 888}
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index fcc9723bad6e..4783cec1f78d 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
291 "Asynchronous external abort"); 291 "Asynchronous external abort");
292} 292}
293 293
294static struct dw_pcie_host_ops keystone_pcie_host_ops = { 294static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
295 .rd_other_conf = ks_dw_pcie_rd_other_conf, 295 .rd_other_conf = ks_dw_pcie_rd_other_conf,
296 .wr_other_conf = ks_dw_pcie_wr_other_conf, 296 .wr_other_conf = ks_dw_pcie_wr_other_conf,
297 .host_init = ks_pcie_host_init, 297 .host_init = ks_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index 27d638c4e134..fd861289ad8b 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -39,7 +39,7 @@ struct ls_pcie_drvdata {
39 u32 lut_offset; 39 u32 lut_offset;
40 u32 ltssm_shift; 40 u32 ltssm_shift;
41 u32 lut_dbg; 41 u32 lut_dbg;
42 struct dw_pcie_host_ops *ops; 42 const struct dw_pcie_host_ops *ops;
43 const struct dw_pcie_ops *dw_pcie_ops; 43 const struct dw_pcie_ops *dw_pcie_ops;
44}; 44};
45 45
@@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
185 return 0; 185 return 0;
186} 186}
187 187
188static struct dw_pcie_host_ops ls1021_pcie_host_ops = { 188static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
189 .host_init = ls1021_pcie_host_init, 189 .host_init = ls1021_pcie_host_init,
190 .msi_host_init = ls_pcie_msi_host_init, 190 .msi_host_init = ls_pcie_msi_host_init,
191}; 191};
192 192
193static struct dw_pcie_host_ops ls_pcie_host_ops = { 193static const struct dw_pcie_host_ops ls_pcie_host_ops = {
194 .host_init = ls_pcie_host_init, 194 .host_init = ls_pcie_host_init,
195 .msi_host_init = ls_pcie_msi_host_init, 195 .msi_host_init = ls_pcie_msi_host_init,
196}; 196};
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index 495b023042b3..ea8f34af6a85 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
160 return IRQ_HANDLED; 160 return IRQ_HANDLED;
161} 161}
162 162
163static struct dw_pcie_host_ops armada8k_pcie_host_ops = { 163static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
164 .host_init = armada8k_pcie_host_init, 164 .host_init = armada8k_pcie_host_init,
165}; 165};
166 166
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 82a04acc42fd..01c6f7823672 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp)
184 artpec6_pcie_enable_interrupts(artpec6_pcie); 184 artpec6_pcie_enable_interrupts(artpec6_pcie);
185} 185}
186 186
187static struct dw_pcie_host_ops artpec6_pcie_host_ops = { 187static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
188 .host_init = artpec6_pcie_host_init, 188 .host_init = artpec6_pcie_host_init,
189}; 189};
190 190
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 32091b32f6e1..091b4e7ad059 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp)
46 dw_pcie_msi_init(pp); 46 dw_pcie_msi_init(pp);
47} 47}
48 48
49static struct dw_pcie_host_ops dw_plat_pcie_host_ops = { 49static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
50 .host_init = dw_plat_pcie_host_init, 50 .host_init = dw_plat_pcie_host_init,
51}; 51};
52 52
@@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
67 67
68 ret = devm_request_irq(dev, pp->msi_irq, 68 ret = devm_request_irq(dev, pp->msi_irq,
69 dw_plat_pcie_msi_irq_handler, 69 dw_plat_pcie_msi_irq_handler,
70 IRQF_SHARED, "dw-plat-pcie-msi", pp); 70 IRQF_SHARED | IRQF_NO_THREAD,
71 "dw-plat-pcie-msi", pp);
71 if (ret) { 72 if (ret) {
72 dev_err(dev, "failed to request MSI IRQ\n"); 73 dev_err(dev, "failed to request MSI IRQ\n");
73 return ret; 74 return ret;
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index c6a840575796..b4d2a89f8e58 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -162,7 +162,7 @@ struct pcie_port {
162 struct resource *mem; 162 struct resource *mem;
163 struct resource *busn; 163 struct resource *busn;
164 int irq; 164 int irq;
165 struct dw_pcie_host_ops *ops; 165 const struct dw_pcie_host_ops *ops;
166 int msi_irq; 166 int msi_irq;
167 struct irq_domain *irq_domain; 167 struct irq_domain *irq_domain;
168 unsigned long msi_data; 168 unsigned long msi_data;
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
new file mode 100644
index 000000000000..33fddb9f6739
--- /dev/null
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -0,0 +1,517 @@
1/*
2 * PCIe host controller driver for Kirin Phone SoCs
3 *
4 * Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
5 * http://www.huawei.com
6 *
7 * Author: Xiaowei Song <songxiaowei@huawei.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <asm/compiler.h>
15#include <linux/compiler.h>
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/gpio.h>
20#include <linux/interrupt.h>
21#include <linux/mfd/syscon.h>
22#include <linux/of_address.h>
23#include <linux/of_gpio.h>
24#include <linux/of_pci.h>
25#include <linux/pci.h>
26#include <linux/pci_regs.h>
27#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/resource.h>
30#include <linux/types.h>
31#include "pcie-designware.h"
32
33#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
34
35#define REF_CLK_FREQ 100000000
36
37/* PCIe ELBI registers */
38#define SOC_PCIECTRL_CTRL0_ADDR 0x000
39#define SOC_PCIECTRL_CTRL1_ADDR 0x004
40#define SOC_PCIEPHY_CTRL2_ADDR 0x008
41#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
42#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
43
44/* info located in APB */
45#define PCIE_APP_LTSSM_ENABLE 0x01c
46#define PCIE_APB_PHY_CTRL0 0x0
47#define PCIE_APB_PHY_CTRL1 0x4
48#define PCIE_APB_PHY_STATUS0 0x400
49#define PCIE_LINKUP_ENABLE (0x8020)
50#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
51#define PIPE_CLK_STABLE (0x1 << 19)
52#define PHY_REF_PAD_BIT (0x1 << 8)
53#define PHY_PWR_DOWN_BIT (0x1 << 22)
54#define PHY_RST_ACK_BIT (0x1 << 16)
55
56/* info located in sysctrl */
57#define SCTRL_PCIE_CMOS_OFFSET 0x60
58#define SCTRL_PCIE_CMOS_BIT 0x10
59#define SCTRL_PCIE_ISO_OFFSET 0x44
60#define SCTRL_PCIE_ISO_BIT 0x30
61#define SCTRL_PCIE_HPCLK_OFFSET 0x190
62#define SCTRL_PCIE_HPCLK_BIT 0x184000
63#define SCTRL_PCIE_OE_OFFSET 0x14a
64#define PCIE_DEBOUNCE_PARAM 0xF0F400
65#define PCIE_OE_BYPASS (0x3 << 28)
66
67/* peri_crg ctrl */
68#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
69#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
70
71/* Time for delay */
72#define REF_2_PERST_MIN 20000
73#define REF_2_PERST_MAX 25000
74#define PERST_2_ACCESS_MIN 10000
75#define PERST_2_ACCESS_MAX 12000
76#define LINK_WAIT_MIN 900
77#define LINK_WAIT_MAX 1000
78#define PIPE_CLK_WAIT_MIN 550
79#define PIPE_CLK_WAIT_MAX 600
80#define TIME_CMOS_MIN 100
81#define TIME_CMOS_MAX 105
82#define TIME_PHY_PD_MIN 10
83#define TIME_PHY_PD_MAX 11
84
85struct kirin_pcie {
86 struct dw_pcie *pci;
87 void __iomem *apb_base;
88 void __iomem *phy_base;
89 struct regmap *crgctrl;
90 struct regmap *sysctrl;
91 struct clk *apb_sys_clk;
92 struct clk *apb_phy_clk;
93 struct clk *phy_ref_clk;
94 struct clk *pcie_aclk;
95 struct clk *pcie_aux_clk;
96 int gpio_id_reset;
97};
98
99/* Registers in PCIeCTRL */
100static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
101 u32 val, u32 reg)
102{
103 writel(val, kirin_pcie->apb_base + reg);
104}
105
106static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
107{
108 return readl(kirin_pcie->apb_base + reg);
109}
110
111/* Registers in PCIePHY */
112static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
113 u32 val, u32 reg)
114{
115 writel(val, kirin_pcie->phy_base + reg);
116}
117
118static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
119{
120 return readl(kirin_pcie->phy_base + reg);
121}
122
123static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
124 struct platform_device *pdev)
125{
126 struct device *dev = &pdev->dev;
127
128 kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
129 if (IS_ERR(kirin_pcie->phy_ref_clk))
130 return PTR_ERR(kirin_pcie->phy_ref_clk);
131
132 kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
133 if (IS_ERR(kirin_pcie->pcie_aux_clk))
134 return PTR_ERR(kirin_pcie->pcie_aux_clk);
135
136 kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
137 if (IS_ERR(kirin_pcie->apb_phy_clk))
138 return PTR_ERR(kirin_pcie->apb_phy_clk);
139
140 kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
141 if (IS_ERR(kirin_pcie->apb_sys_clk))
142 return PTR_ERR(kirin_pcie->apb_sys_clk);
143
144 kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
145 if (IS_ERR(kirin_pcie->pcie_aclk))
146 return PTR_ERR(kirin_pcie->pcie_aclk);
147
148 return 0;
149}
150
151static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
152 struct platform_device *pdev)
153{
154 struct device *dev = &pdev->dev;
155 struct resource *apb;
156 struct resource *phy;
157 struct resource *dbi;
158
159 apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
160 kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
161 if (IS_ERR(kirin_pcie->apb_base))
162 return PTR_ERR(kirin_pcie->apb_base);
163
164 phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
165 kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
166 if (IS_ERR(kirin_pcie->phy_base))
167 return PTR_ERR(kirin_pcie->phy_base);
168
169 dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
170 kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
171 if (IS_ERR(kirin_pcie->pci->dbi_base))
172 return PTR_ERR(kirin_pcie->pci->dbi_base);
173
174 kirin_pcie->crgctrl =
175 syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
176 if (IS_ERR(kirin_pcie->crgctrl))
177 return PTR_ERR(kirin_pcie->crgctrl);
178
179 kirin_pcie->sysctrl =
180 syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
181 if (IS_ERR(kirin_pcie->sysctrl))
182 return PTR_ERR(kirin_pcie->sysctrl);
183
184 return 0;
185}
186
187static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
188{
189 struct device *dev = kirin_pcie->pci->dev;
190 u32 reg_val;
191
192 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
193 reg_val &= ~PHY_REF_PAD_BIT;
194 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
195
196 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
197 reg_val &= ~PHY_PWR_DOWN_BIT;
198 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
199 usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
200
201 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
202 reg_val &= ~PHY_RST_ACK_BIT;
203 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
204
205 usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
206 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
207 if (reg_val & PIPE_CLK_STABLE) {
208 dev_err(dev, "PIPE clk is not stable\n");
209 return -EINVAL;
210 }
211
212 return 0;
213}
214
215static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
216{
217 u32 val;
218
219 regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
220 val |= PCIE_DEBOUNCE_PARAM;
221 val &= ~PCIE_OE_BYPASS;
222 regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
223}
224
225static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
226{
227 int ret = 0;
228
229 if (!enable)
230 goto close_clk;
231
232 ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
233 if (ret)
234 return ret;
235
236 ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
237 if (ret)
238 return ret;
239
240 ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
241 if (ret)
242 goto apb_sys_fail;
243
244 ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
245 if (ret)
246 goto apb_phy_fail;
247
248 ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
249 if (ret)
250 goto aclk_fail;
251
252 ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
253 if (ret)
254 goto aux_clk_fail;
255
256 return 0;
257
258close_clk:
259 clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
260aux_clk_fail:
261 clk_disable_unprepare(kirin_pcie->pcie_aclk);
262aclk_fail:
263 clk_disable_unprepare(kirin_pcie->apb_phy_clk);
264apb_phy_fail:
265 clk_disable_unprepare(kirin_pcie->apb_sys_clk);
266apb_sys_fail:
267 clk_disable_unprepare(kirin_pcie->phy_ref_clk);
268
269 return ret;
270}
271
272static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
273{
274 int ret;
275
276 /* Power supply for Host */
277 regmap_write(kirin_pcie->sysctrl,
278 SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
279 usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
280 kirin_pcie_oe_enable(kirin_pcie);
281
282 ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
283 if (ret)
284 return ret;
285
286 /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
287 regmap_write(kirin_pcie->sysctrl,
288 SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
289 regmap_write(kirin_pcie->crgctrl,
290 CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
291 regmap_write(kirin_pcie->sysctrl,
292 SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
293
294 ret = kirin_pcie_phy_init(kirin_pcie);
295 if (ret)
296 goto close_clk;
297
298 /* perst assert Endpoint */
299 if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
300 usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
301 ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
302 if (ret)
303 goto close_clk;
304 usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
305
306 return 0;
307 }
308
309close_clk:
310 kirin_pcie_clk_ctrl(kirin_pcie, false);
311 return ret;
312}
313
314static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
315 bool on)
316{
317 u32 val;
318
319 val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
320 if (on)
321 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
322 else
323 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
324
325 kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
326}
327
328static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
329 bool on)
330{
331 u32 val;
332
333 val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
334 if (on)
335 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
336 else
337 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
338
339 kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
340}
341
342static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
343 int where, int size, u32 *val)
344{
345 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
346 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
347 int ret;
348
349 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
350 ret = dw_pcie_read(pci->dbi_base + where, size, val);
351 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
352
353 return ret;
354}
355
356static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
357 int where, int size, u32 val)
358{
359 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
360 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
361 int ret;
362
363 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
364 ret = dw_pcie_write(pci->dbi_base + where, size, val);
365 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
366
367 return ret;
368}
369
370static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
371 u32 reg, size_t size)
372{
373 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
374 u32 ret;
375
376 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
377 dw_pcie_read(base + reg, size, &ret);
378 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
379
380 return ret;
381}
382
383static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
384 u32 reg, size_t size, u32 val)
385{
386 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
387
388 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
389 dw_pcie_write(base + reg, size, val);
390 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
391}
392
393static int kirin_pcie_link_up(struct dw_pcie *pci)
394{
395 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
396 u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
397
398 if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
399 return 1;
400
401 return 0;
402}
403
404static int kirin_pcie_establish_link(struct pcie_port *pp)
405{
406 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
407 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
408 struct device *dev = kirin_pcie->pci->dev;
409 int count = 0;
410
411 if (kirin_pcie_link_up(pci))
412 return 0;
413
414 dw_pcie_setup_rc(pp);
415
416 /* assert LTSSM enable */
417 kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
418 PCIE_APP_LTSSM_ENABLE);
419
420 /* check if the link is up or not */
421 while (!kirin_pcie_link_up(pci)) {
422 usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
423 count++;
424 if (count == 1000) {
425 dev_err(dev, "Link Fail\n");
426 return -EINVAL;
427 }
428 }
429
430 return 0;
431}
432
433static void kirin_pcie_host_init(struct pcie_port *pp)
434{
435 kirin_pcie_establish_link(pp);
436}
437
438static struct dw_pcie_ops kirin_dw_pcie_ops = {
439 .read_dbi = kirin_pcie_read_dbi,
440 .write_dbi = kirin_pcie_write_dbi,
441 .link_up = kirin_pcie_link_up,
442};
443
444static struct dw_pcie_host_ops kirin_pcie_host_ops = {
445 .rd_own_conf = kirin_pcie_rd_own_conf,
446 .wr_own_conf = kirin_pcie_wr_own_conf,
447 .host_init = kirin_pcie_host_init,
448};
449
450static int __init kirin_add_pcie_port(struct dw_pcie *pci,
451 struct platform_device *pdev)
452{
453 pci->pp.ops = &kirin_pcie_host_ops;
454
455 return dw_pcie_host_init(&pci->pp);
456}
457
458static int kirin_pcie_probe(struct platform_device *pdev)
459{
460 struct device *dev = &pdev->dev;
461 struct kirin_pcie *kirin_pcie;
462 struct dw_pcie *pci;
463 int ret;
464
465 if (!dev->of_node) {
466 dev_err(dev, "NULL node\n");
467 return -EINVAL;
468 }
469
470 kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
471 if (!kirin_pcie)
472 return -ENOMEM;
473
474 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
475 if (!pci)
476 return -ENOMEM;
477
478 pci->dev = dev;
479 pci->ops = &kirin_dw_pcie_ops;
480 kirin_pcie->pci = pci;
481
482 ret = kirin_pcie_get_clk(kirin_pcie, pdev);
483 if (ret)
484 return ret;
485
486 ret = kirin_pcie_get_resource(kirin_pcie, pdev);
487 if (ret)
488 return ret;
489
490 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
491 "reset-gpio", 0);
492 if (kirin_pcie->gpio_id_reset < 0)
493 return -ENODEV;
494
495 ret = kirin_pcie_power_on(kirin_pcie);
496 if (ret)
497 return ret;
498
499 platform_set_drvdata(pdev, kirin_pcie);
500
501 return kirin_add_pcie_port(pci, pdev);
502}
503
504static const struct of_device_id kirin_pcie_match[] = {
505 { .compatible = "hisilicon,kirin960-pcie" },
506 {},
507};
508
509struct platform_driver kirin_pcie_driver = {
510 .probe = kirin_pcie_probe,
511 .driver = {
512 .name = "kirin-pcie",
513 .of_match_table = kirin_pcie_match,
514 .suppress_bind_attrs = true,
515 },
516};
517builtin_platform_driver(kirin_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5bf23d432fdb..68c5f2ab5bc8 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -51,6 +51,12 @@
51#define PCIE20_ELBI_SYS_CTRL 0x04 51#define PCIE20_ELBI_SYS_CTRL 0x04
52#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 52#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
53 53
54#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
55#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
56#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
57#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
58#define CFG_BRIDGE_SB_INIT BIT(0)
59
54#define PCIE20_CAP 0x70 60#define PCIE20_CAP 0x70
55 61
56#define PERST_DELAY_US 1000 62#define PERST_DELAY_US 1000
@@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 {
86 struct clk *pipe_clk; 92 struct clk *pipe_clk;
87}; 93};
88 94
95struct qcom_pcie_resources_v3 {
96 struct clk *aux_clk;
97 struct clk *master_clk;
98 struct clk *slave_clk;
99 struct reset_control *axi_m_reset;
100 struct reset_control *axi_s_reset;
101 struct reset_control *pipe_reset;
102 struct reset_control *axi_m_vmid_reset;
103 struct reset_control *axi_s_xpu_reset;
104 struct reset_control *parf_reset;
105 struct reset_control *phy_reset;
106 struct reset_control *axi_m_sticky_reset;
107 struct reset_control *pipe_sticky_reset;
108 struct reset_control *pwr_reset;
109 struct reset_control *ahb_reset;
110 struct reset_control *phy_ahb_reset;
111};
112
89union qcom_pcie_resources { 113union qcom_pcie_resources {
90 struct qcom_pcie_resources_v0 v0; 114 struct qcom_pcie_resources_v0 v0;
91 struct qcom_pcie_resources_v1 v1; 115 struct qcom_pcie_resources_v1 v1;
92 struct qcom_pcie_resources_v2 v2; 116 struct qcom_pcie_resources_v2 v2;
117 struct qcom_pcie_resources_v3 v3;
93}; 118};
94 119
95struct qcom_pcie; 120struct qcom_pcie;
@@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
133 return dw_handle_msi_irq(pp); 158 return dw_handle_msi_irq(pp);
134} 159}
135 160
136static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
137{
138 u32 val;
139
140 /* enable link training */
141 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
142 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
143 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
144}
145
146static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
147{
148 u32 val;
149
150 /* enable link training */
151 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
152 val |= BIT(8);
153 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
154}
155
156static int qcom_pcie_establish_link(struct qcom_pcie *pcie) 161static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
157{ 162{
158 struct dw_pcie *pci = pcie->pci; 163 struct dw_pcie *pci = pcie->pci;
@@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
167 return dw_pcie_wait_for_link(pci); 172 return dw_pcie_wait_for_link(pci);
168} 173}
169 174
175static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
176{
177 u32 val;
178
179 /* enable link training */
180 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
181 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
182 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
183}
184
170static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) 185static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
171{ 186{
172 struct qcom_pcie_resources_v0 *res = &pcie->res.v0; 187 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
217 return PTR_ERR_OR_ZERO(res->phy_reset); 232 return PTR_ERR_OR_ZERO(res->phy_reset);
218} 233}
219 234
220static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
221{
222 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
223 struct dw_pcie *pci = pcie->pci;
224 struct device *dev = pci->dev;
225
226 res->vdda = devm_regulator_get(dev, "vdda");
227 if (IS_ERR(res->vdda))
228 return PTR_ERR(res->vdda);
229
230 res->iface = devm_clk_get(dev, "iface");
231 if (IS_ERR(res->iface))
232 return PTR_ERR(res->iface);
233
234 res->aux = devm_clk_get(dev, "aux");
235 if (IS_ERR(res->aux))
236 return PTR_ERR(res->aux);
237
238 res->master_bus = devm_clk_get(dev, "master_bus");
239 if (IS_ERR(res->master_bus))
240 return PTR_ERR(res->master_bus);
241
242 res->slave_bus = devm_clk_get(dev, "slave_bus");
243 if (IS_ERR(res->slave_bus))
244 return PTR_ERR(res->slave_bus);
245
246 res->core = devm_reset_control_get(dev, "core");
247 return PTR_ERR_OR_ZERO(res->core);
248}
249
250static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) 235static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
251{ 236{
252 struct qcom_pcie_resources_v0 *res = &pcie->res.v0; 237 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
357 /* wait for clock acquisition */ 342 /* wait for clock acquisition */
358 usleep_range(1000, 1500); 343 usleep_range(1000, 1500);
359 344
345
346 /* Set the Max TLP size to 2K, instead of using default of 4K */
347 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
348 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
349 writel(CFG_BRIDGE_SB_INIT,
350 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
351
360 return 0; 352 return 0;
361 353
362err_deassert_ahb: 354err_deassert_ahb:
@@ -375,6 +367,36 @@ err_refclk:
375 return ret; 367 return ret;
376} 368}
377 369
370static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
371{
372 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
373 struct dw_pcie *pci = pcie->pci;
374 struct device *dev = pci->dev;
375
376 res->vdda = devm_regulator_get(dev, "vdda");
377 if (IS_ERR(res->vdda))
378 return PTR_ERR(res->vdda);
379
380 res->iface = devm_clk_get(dev, "iface");
381 if (IS_ERR(res->iface))
382 return PTR_ERR(res->iface);
383
384 res->aux = devm_clk_get(dev, "aux");
385 if (IS_ERR(res->aux))
386 return PTR_ERR(res->aux);
387
388 res->master_bus = devm_clk_get(dev, "master_bus");
389 if (IS_ERR(res->master_bus))
390 return PTR_ERR(res->master_bus);
391
392 res->slave_bus = devm_clk_get(dev, "slave_bus");
393 if (IS_ERR(res->slave_bus))
394 return PTR_ERR(res->slave_bus);
395
396 res->core = devm_reset_control_get(dev, "core");
397 return PTR_ERR_OR_ZERO(res->core);
398}
399
378static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) 400static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
379{ 401{
380 struct qcom_pcie_resources_v1 *res = &pcie->res.v1; 402 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
@@ -455,6 +477,16 @@ err_res:
455 return ret; 477 return ret;
456} 478}
457 479
480static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
481{
482 u32 val;
483
484 /* enable link training */
485 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
486 val |= BIT(8);
487 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
488}
489
458static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) 490static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
459{ 491{
460 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 492 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
481 return PTR_ERR_OR_ZERO(res->pipe_clk); 513 return PTR_ERR_OR_ZERO(res->pipe_clk);
482} 514}
483 515
516static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
517{
518 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
519
520 clk_disable_unprepare(res->pipe_clk);
521 clk_disable_unprepare(res->slave_clk);
522 clk_disable_unprepare(res->master_clk);
523 clk_disable_unprepare(res->cfg_clk);
524 clk_disable_unprepare(res->aux_clk);
525}
526
484static int qcom_pcie_init_v2(struct qcom_pcie *pcie) 527static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
485{ 528{
486 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 529 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -562,22 +605,290 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
562 return 0; 605 return 0;
563} 606}
564 607
565static int qcom_pcie_link_up(struct dw_pcie *pci) 608static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
566{ 609{
567 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); 610 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
611 struct dw_pcie *pci = pcie->pci;
612 struct device *dev = pci->dev;
568 613
569 return !!(val & PCI_EXP_LNKSTA_DLLLA); 614 res->aux_clk = devm_clk_get(dev, "aux");
615 if (IS_ERR(res->aux_clk))
616 return PTR_ERR(res->aux_clk);
617
618 res->master_clk = devm_clk_get(dev, "master_bus");
619 if (IS_ERR(res->master_clk))
620 return PTR_ERR(res->master_clk);
621
622 res->slave_clk = devm_clk_get(dev, "slave_bus");
623 if (IS_ERR(res->slave_clk))
624 return PTR_ERR(res->slave_clk);
625
626 res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
627 if (IS_ERR(res->axi_m_reset))
628 return PTR_ERR(res->axi_m_reset);
629
630 res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
631 if (IS_ERR(res->axi_s_reset))
632 return PTR_ERR(res->axi_s_reset);
633
634 res->pipe_reset = devm_reset_control_get(dev, "pipe");
635 if (IS_ERR(res->pipe_reset))
636 return PTR_ERR(res->pipe_reset);
637
638 res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
639 if (IS_ERR(res->axi_m_vmid_reset))
640 return PTR_ERR(res->axi_m_vmid_reset);
641
642 res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
643 if (IS_ERR(res->axi_s_xpu_reset))
644 return PTR_ERR(res->axi_s_xpu_reset);
645
646 res->parf_reset = devm_reset_control_get(dev, "parf");
647 if (IS_ERR(res->parf_reset))
648 return PTR_ERR(res->parf_reset);
649
650 res->phy_reset = devm_reset_control_get(dev, "phy");
651 if (IS_ERR(res->phy_reset))
652 return PTR_ERR(res->phy_reset);
653
654 res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
655 if (IS_ERR(res->axi_m_sticky_reset))
656 return PTR_ERR(res->axi_m_sticky_reset);
657
658 res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
659 if (IS_ERR(res->pipe_sticky_reset))
660 return PTR_ERR(res->pipe_sticky_reset);
661
662 res->pwr_reset = devm_reset_control_get(dev, "pwr");
663 if (IS_ERR(res->pwr_reset))
664 return PTR_ERR(res->pwr_reset);
665
666 res->ahb_reset = devm_reset_control_get(dev, "ahb");
667 if (IS_ERR(res->ahb_reset))
668 return PTR_ERR(res->ahb_reset);
669
670 res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
671 if (IS_ERR(res->phy_ahb_reset))
672 return PTR_ERR(res->phy_ahb_reset);
673
674 return 0;
570} 675}
571 676
572static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) 677static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
573{ 678{
574 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 679 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
575 680
576 clk_disable_unprepare(res->pipe_clk); 681 reset_control_assert(res->axi_m_reset);
682 reset_control_assert(res->axi_s_reset);
683 reset_control_assert(res->pipe_reset);
684 reset_control_assert(res->pipe_sticky_reset);
685 reset_control_assert(res->phy_reset);
686 reset_control_assert(res->phy_ahb_reset);
687 reset_control_assert(res->axi_m_sticky_reset);
688 reset_control_assert(res->pwr_reset);
689 reset_control_assert(res->ahb_reset);
690 clk_disable_unprepare(res->aux_clk);
691 clk_disable_unprepare(res->master_clk);
577 clk_disable_unprepare(res->slave_clk); 692 clk_disable_unprepare(res->slave_clk);
693}
694
695static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
696{
697 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
698 struct dw_pcie *pci = pcie->pci;
699 struct device *dev = pci->dev;
700 u32 val;
701 int ret;
702
703 ret = reset_control_assert(res->axi_m_reset);
704 if (ret) {
705 dev_err(dev, "cannot assert axi master reset\n");
706 return ret;
707 }
708
709 ret = reset_control_assert(res->axi_s_reset);
710 if (ret) {
711 dev_err(dev, "cannot assert axi slave reset\n");
712 return ret;
713 }
714
715 usleep_range(10000, 12000);
716
717 ret = reset_control_assert(res->pipe_reset);
718 if (ret) {
719 dev_err(dev, "cannot assert pipe reset\n");
720 return ret;
721 }
722
723 ret = reset_control_assert(res->pipe_sticky_reset);
724 if (ret) {
725 dev_err(dev, "cannot assert pipe sticky reset\n");
726 return ret;
727 }
728
729 ret = reset_control_assert(res->phy_reset);
730 if (ret) {
731 dev_err(dev, "cannot assert phy reset\n");
732 return ret;
733 }
734
735 ret = reset_control_assert(res->phy_ahb_reset);
736 if (ret) {
737 dev_err(dev, "cannot assert phy ahb reset\n");
738 return ret;
739 }
740
741 usleep_range(10000, 12000);
742
743 ret = reset_control_assert(res->axi_m_sticky_reset);
744 if (ret) {
745 dev_err(dev, "cannot assert axi master sticky reset\n");
746 return ret;
747 }
748
749 ret = reset_control_assert(res->pwr_reset);
750 if (ret) {
751 dev_err(dev, "cannot assert power reset\n");
752 return ret;
753 }
754
755 ret = reset_control_assert(res->ahb_reset);
756 if (ret) {
757 dev_err(dev, "cannot assert ahb reset\n");
758 return ret;
759 }
760
761 usleep_range(10000, 12000);
762
763 ret = reset_control_deassert(res->phy_ahb_reset);
764 if (ret) {
765 dev_err(dev, "cannot deassert phy ahb reset\n");
766 return ret;
767 }
768
769 ret = reset_control_deassert(res->phy_reset);
770 if (ret) {
771 dev_err(dev, "cannot deassert phy reset\n");
772 goto err_rst_phy;
773 }
774
775 ret = reset_control_deassert(res->pipe_reset);
776 if (ret) {
777 dev_err(dev, "cannot deassert pipe reset\n");
778 goto err_rst_pipe;
779 }
780
781 ret = reset_control_deassert(res->pipe_sticky_reset);
782 if (ret) {
783 dev_err(dev, "cannot deassert pipe sticky reset\n");
784 goto err_rst_pipe_sticky;
785 }
786
787 usleep_range(10000, 12000);
788
789 ret = reset_control_deassert(res->axi_m_reset);
790 if (ret) {
791 dev_err(dev, "cannot deassert axi master reset\n");
792 goto err_rst_axi_m;
793 }
794
795 ret = reset_control_deassert(res->axi_m_sticky_reset);
796 if (ret) {
797 dev_err(dev, "cannot deassert axi master sticky reset\n");
798 goto err_rst_axi_m_sticky;
799 }
800
801 ret = reset_control_deassert(res->axi_s_reset);
802 if (ret) {
803 dev_err(dev, "cannot deassert axi slave reset\n");
804 goto err_rst_axi_s;
805 }
806
807 ret = reset_control_deassert(res->pwr_reset);
808 if (ret) {
809 dev_err(dev, "cannot deassert power reset\n");
810 goto err_rst_pwr;
811 }
812
813 ret = reset_control_deassert(res->ahb_reset);
814 if (ret) {
815 dev_err(dev, "cannot deassert ahb reset\n");
816 goto err_rst_ahb;
817 }
818
819 usleep_range(10000, 12000);
820
821 ret = clk_prepare_enable(res->aux_clk);
822 if (ret) {
823 dev_err(dev, "cannot prepare/enable iface clock\n");
824 goto err_clk_aux;
825 }
826
827 ret = clk_prepare_enable(res->master_clk);
828 if (ret) {
829 dev_err(dev, "cannot prepare/enable core clock\n");
830 goto err_clk_axi_m;
831 }
832
833 ret = clk_prepare_enable(res->slave_clk);
834 if (ret) {
835 dev_err(dev, "cannot prepare/enable phy clock\n");
836 goto err_clk_axi_s;
837 }
838
839 /* enable PCIe clocks and resets */
840 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
841 val &= !BIT(0);
842 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
843
844 /* change DBI base address */
845 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
846
847 /* MAC PHY_POWERDOWN MUX DISABLE */
848 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
849 val &= ~BIT(29);
850 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
851
852 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
853 val |= BIT(4);
854 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
855
856 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
857 val |= BIT(31);
858 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
859
860 return 0;
861
862err_clk_axi_s:
578 clk_disable_unprepare(res->master_clk); 863 clk_disable_unprepare(res->master_clk);
579 clk_disable_unprepare(res->cfg_clk); 864err_clk_axi_m:
580 clk_disable_unprepare(res->aux_clk); 865 clk_disable_unprepare(res->aux_clk);
866err_clk_aux:
867 reset_control_assert(res->ahb_reset);
868err_rst_ahb:
869 reset_control_assert(res->pwr_reset);
870err_rst_pwr:
871 reset_control_assert(res->axi_s_reset);
872err_rst_axi_s:
873 reset_control_assert(res->axi_m_sticky_reset);
874err_rst_axi_m_sticky:
875 reset_control_assert(res->axi_m_reset);
876err_rst_axi_m:
877 reset_control_assert(res->pipe_sticky_reset);
878err_rst_pipe_sticky:
879 reset_control_assert(res->pipe_reset);
880err_rst_pipe:
881 reset_control_assert(res->phy_reset);
882err_rst_phy:
883 reset_control_assert(res->phy_ahb_reset);
884 return ret;
885}
886
887static int qcom_pcie_link_up(struct dw_pcie *pci)
888{
889 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
890
891 return !!(val & PCI_EXP_LNKSTA_DLLLA);
581} 892}
582 893
583static void qcom_pcie_host_init(struct pcie_port *pp) 894static void qcom_pcie_host_init(struct pcie_port *pp)
@@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
634 return dw_pcie_read(pci->dbi_base + where, size, val); 945 return dw_pcie_read(pci->dbi_base + where, size, val);
635} 946}
636 947
637static struct dw_pcie_host_ops qcom_pcie_dw_ops = { 948static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
638 .host_init = qcom_pcie_host_init, 949 .host_init = qcom_pcie_host_init,
639 .rd_own_conf = qcom_pcie_rd_own_conf, 950 .rd_own_conf = qcom_pcie_rd_own_conf,
640}; 951};
@@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = {
665 .link_up = qcom_pcie_link_up, 976 .link_up = qcom_pcie_link_up,
666}; 977};
667 978
979static const struct qcom_pcie_ops ops_v3 = {
980 .get_resources = qcom_pcie_get_resources_v3,
981 .init = qcom_pcie_init_v3,
982 .deinit = qcom_pcie_deinit_v3,
983 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
984};
985
668static int qcom_pcie_probe(struct platform_device *pdev) 986static int qcom_pcie_probe(struct platform_device *pdev)
669{ 987{
670 struct device *dev = &pdev->dev; 988 struct device *dev = &pdev->dev;
@@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
727 1045
728 ret = devm_request_irq(dev, pp->msi_irq, 1046 ret = devm_request_irq(dev, pp->msi_irq,
729 qcom_pcie_msi_irq_handler, 1047 qcom_pcie_msi_irq_handler,
730 IRQF_SHARED, "qcom-pcie-msi", pp); 1048 IRQF_SHARED | IRQF_NO_THREAD,
1049 "qcom-pcie-msi", pp);
731 if (ret) { 1050 if (ret) {
732 dev_err(dev, "cannot request msi irq\n"); 1051 dev_err(dev, "cannot request msi irq\n");
733 return ret; 1052 return ret;
@@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = {
754 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, 1073 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
755 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, 1074 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
756 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, 1075 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
1076 { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
757 { } 1077 { }
758}; 1078};
759 1079
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index 8ff36b3dbbdf..80897291e0fb 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp)
186 spear13xx_pcie_enable_interrupts(spear13xx_pcie); 186 spear13xx_pcie_enable_interrupts(spear13xx_pcie);
187} 187}
188 188
189static struct dw_pcie_host_ops spear13xx_pcie_host_ops = { 189static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
190 .host_init = spear13xx_pcie_host_init, 190 .host_init = spear13xx_pcie_host_init,
191}; 191};
192 192
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index c23f146fb5a6..c09623ca8c3b 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -6,6 +6,7 @@ menu "PCI Endpoint"
6 6
7config PCI_ENDPOINT 7config PCI_ENDPOINT
8 bool "PCI Endpoint Support" 8 bool "PCI Endpoint Support"
9 depends on HAS_DMA
9 help 10 help
10 Enable this configuration option to support configurable PCI 11 Enable this configuration option to support configurable PCI
11 endpoint. This should be enabled if the platform has a PCI 12 endpoint. This should be enabled if the platform has a PCI
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 7f47cd5e10a5..0cd5b30dccb1 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -180,6 +180,17 @@ config PCIE_ROCKCHIP
180 There is 1 internal PCIe port available to support GEN2 with 180 There is 1 internal PCIe port available to support GEN2 with
181 4 slots. 181 4 slots.
182 182
183config PCIE_MEDIATEK
184 bool "MediaTek PCIe controller"
185 depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST)
186 depends on OF
187 depends on PCI
188 select PCIEPORTBUS
189 help
190 Say Y here if you want to enable PCIe controller support on
191 MT7623 series SoCs. There is one single root complex with 3 root
192 ports available. Each port supports Gen2 lane x1.
193
183config VMD 194config VMD
184 depends on PCI_MSI && X86_64 && SRCU 195 depends on PCI_MSI && X86_64 && SRCU
185 tristate "Intel Volume Management Device Driver" 196 tristate "Intel Volume Management Device Driver"
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index cab879578003..b10d104c85fd 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
18obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o 18obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
19obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o 19obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
20obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o 20obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
21obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
21obj-$(CONFIG_VMD) += vmd.o 22obj-$(CONFIG_VMD) += vmd.o
22 23
23# The following drivers are for devices that use the generic ACPI 24# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index e938eebcb180..5162dffc102b 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -25,6 +25,7 @@
25#include <linux/irqchip/chained_irq.h> 25#include <linux/irqchip/chained_irq.h>
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/clk.h>
28 29
29/* 30/*
30 * Special configuration registers directly in the first few words 31 * Special configuration registers directly in the first few words
@@ -37,6 +38,7 @@
37#define PCI_CONFIG 0x28 /* PCI configuration command register */ 38#define PCI_CONFIG 0x28 /* PCI configuration command register */
38#define PCI_DATA 0x2C 39#define PCI_DATA 0x2C
39 40
41#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */
40#define FARADAY_PCI_PMC 0x40 /* Power management control */ 42#define FARADAY_PCI_PMC 0x40 /* Power management control */
41#define FARADAY_PCI_PMCSR 0x44 /* Power management status */ 43#define FARADAY_PCI_PMCSR 0x44 /* Power management status */
42#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ 44#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */
@@ -45,6 +47,8 @@
45#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ 47#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */
46#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ 48#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */
47 49
50#define PCI_STATUS_66MHZ_CAPABLE BIT(21)
51
48/* Bits 31..28 gives INTD..INTA status */ 52/* Bits 31..28 gives INTD..INTA status */
49#define PCI_CTRL2_INTSTS_SHIFT 28 53#define PCI_CTRL2_INTSTS_SHIFT 28
50#define PCI_CTRL2_INTMASK_CMDERR BIT(27) 54#define PCI_CTRL2_INTMASK_CMDERR BIT(27)
@@ -117,6 +121,7 @@ struct faraday_pci {
117 void __iomem *base; 121 void __iomem *base;
118 struct irq_domain *irqdomain; 122 struct irq_domain *irqdomain;
119 struct pci_bus *bus; 123 struct pci_bus *bus;
124 struct clk *bus_clk;
120}; 125};
121 126
122static int faraday_res_to_memcfg(resource_size_t mem_base, 127static int faraday_res_to_memcfg(resource_size_t mem_base,
@@ -444,6 +449,9 @@ static int faraday_pci_probe(struct platform_device *pdev)
444 struct resource *mem; 449 struct resource *mem;
445 struct resource *io; 450 struct resource *io;
446 struct pci_host_bridge *host; 451 struct pci_host_bridge *host;
452 struct clk *clk;
453 unsigned char max_bus_speed = PCI_SPEED_33MHz;
454 unsigned char cur_bus_speed = PCI_SPEED_33MHz;
447 int ret; 455 int ret;
448 u32 val; 456 u32 val;
449 LIST_HEAD(res); 457 LIST_HEAD(res);
@@ -462,6 +470,24 @@ static int faraday_pci_probe(struct platform_device *pdev)
462 host->sysdata = p; 470 host->sysdata = p;
463 p->dev = dev; 471 p->dev = dev;
464 472
473 /* Retrieve and enable optional clocks */
474 clk = devm_clk_get(dev, "PCLK");
475 if (IS_ERR(clk))
476 return PTR_ERR(clk);
477 ret = clk_prepare_enable(clk);
478 if (ret) {
479 dev_err(dev, "could not prepare PCLK\n");
480 return ret;
481 }
482 p->bus_clk = devm_clk_get(dev, "PCICLK");
483 if (IS_ERR(p->bus_clk))
484 return PTR_ERR(clk);
485 ret = clk_prepare_enable(p->bus_clk);
486 if (ret) {
487 dev_err(dev, "could not prepare PCICLK\n");
488 return ret;
489 }
490
465 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 491 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
466 p->base = devm_ioremap_resource(dev, regs); 492 p->base = devm_ioremap_resource(dev, regs);
467 if (IS_ERR(p->base)) 493 if (IS_ERR(p->base))
@@ -524,6 +550,34 @@ static int faraday_pci_probe(struct platform_device *pdev)
524 } 550 }
525 } 551 }
526 552
553 /* Check bus clock if we can gear up to 66 MHz */
554 if (!IS_ERR(p->bus_clk)) {
555 unsigned long rate;
556 u32 val;
557
558 faraday_raw_pci_read_config(p, 0, 0,
559 FARADAY_PCI_STATUS_CMD, 4, &val);
560 rate = clk_get_rate(p->bus_clk);
561
562 if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) {
563 dev_info(dev, "33MHz bus is 66MHz capable\n");
564 max_bus_speed = PCI_SPEED_66MHz;
565 ret = clk_set_rate(p->bus_clk, 66000000);
566 if (ret)
567 dev_err(dev, "failed to set bus clock\n");
568 } else {
569 dev_info(dev, "33MHz only bus\n");
570 max_bus_speed = PCI_SPEED_33MHz;
571 }
572
573 /* Bumping the clock may fail so read back the rate */
574 rate = clk_get_rate(p->bus_clk);
575 if (rate == 33000000)
576 cur_bus_speed = PCI_SPEED_33MHz;
577 if (rate == 66000000)
578 cur_bus_speed = PCI_SPEED_66MHz;
579 }
580
527 ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); 581 ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
528 if (ret) 582 if (ret)
529 return ret; 583 return ret;
@@ -535,6 +589,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
535 return ret; 589 return ret;
536 } 590 }
537 p->bus = host->bus; 591 p->bus = host->bus;
592 p->bus->max_bus_speed = max_bus_speed;
593 p->bus->cur_bus_speed = cur_bus_speed;
538 594
539 pci_bus_assign_resources(p->bus); 595 pci_bus_assign_resources(p->bus);
540 pci_bus_add_devices(p->bus); 596 pci_bus_add_devices(p->bus);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 84936383e269..415dcc69a502 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -64,22 +64,39 @@
64 * major version. 64 * major version.
65 */ 65 */
66 66
67#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major))) 67#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
68#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) 68#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
69#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) 69#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
70 70
71enum { 71enum pci_protocol_version_t {
72 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), 72 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
73 PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 73 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
74}; 74};
75 75
76#define CPU_AFFINITY_ALL -1ULL 76#define CPU_AFFINITY_ALL -1ULL
77
78/*
79 * Supported protocol versions in the order of probing - highest go
80 * first.
81 */
82static enum pci_protocol_version_t pci_protocol_versions[] = {
83 PCI_PROTOCOL_VERSION_1_2,
84 PCI_PROTOCOL_VERSION_1_1,
85};
86
87/*
88 * Protocol version negotiated by hv_pci_protocol_negotiation().
89 */
90static enum pci_protocol_version_t pci_protocol_version;
91
77#define PCI_CONFIG_MMIO_LENGTH 0x2000 92#define PCI_CONFIG_MMIO_LENGTH 0x2000
78#define CFG_PAGE_OFFSET 0x1000 93#define CFG_PAGE_OFFSET 0x1000
79#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) 94#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
80 95
81#define MAX_SUPPORTED_MSI_MESSAGES 0x400 96#define MAX_SUPPORTED_MSI_MESSAGES 0x400
82 97
98#define STATUS_REVISION_MISMATCH 0xC0000059
99
83/* 100/*
84 * Message Types 101 * Message Types
85 */ 102 */
@@ -109,6 +126,9 @@ enum pci_message_type {
109 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, 126 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
110 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, 127 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
111 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, 128 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
129 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
130 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
131 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
112 PCI_MESSAGE_MAXIMUM 132 PCI_MESSAGE_MAXIMUM
113}; 133};
114 134
@@ -179,6 +199,30 @@ struct hv_msi_desc {
179} __packed; 199} __packed;
180 200
181/** 201/**
202 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
203 * @vector: IDT entry
204 * @delivery_mode: As defined in Intel's Programmer's
205 * Reference Manual, Volume 3, Chapter 8.
206 * @vector_count: Number of contiguous entries in the
207 * Interrupt Descriptor Table that are
208 * occupied by this Message-Signaled
209 * Interrupt. For "MSI", as first defined
210 * in PCI 2.2, this can be between 1 and
211 * 32. For "MSI-X," as first defined in PCI
212 * 3.0, this must be 1, as each MSI-X table
213 * entry would have its own descriptor.
214 * @processor_count: number of bits enabled in array.
215 * @processor_array: All the target virtual processors.
216 */
217struct hv_msi_desc2 {
218 u8 vector;
219 u8 delivery_mode;
220 u16 vector_count;
221 u16 processor_count;
222 u16 processor_array[32];
223} __packed;
224
225/**
182 * struct tran_int_desc 226 * struct tran_int_desc
183 * @reserved: unused, padding 227 * @reserved: unused, padding
184 * @vector_count: same as in hv_msi_desc 228 * @vector_count: same as in hv_msi_desc
@@ -245,7 +289,7 @@ struct pci_packet {
245 289
246struct pci_version_request { 290struct pci_version_request {
247 struct pci_message message_type; 291 struct pci_message message_type;
248 enum pci_message_type protocol_version; 292 u32 protocol_version;
249} __packed; 293} __packed;
250 294
251/* 295/*
@@ -294,6 +338,14 @@ struct pci_resources_assigned {
294 u32 reserved[4]; 338 u32 reserved[4];
295} __packed; 339} __packed;
296 340
341struct pci_resources_assigned2 {
342 struct pci_message message_type;
343 union win_slot_encoding wslot;
344 u8 memory_range[0x14][6]; /* not used here */
345 u32 msi_descriptor_count;
346 u8 reserved[70];
347} __packed;
348
297struct pci_create_interrupt { 349struct pci_create_interrupt {
298 struct pci_message message_type; 350 struct pci_message message_type;
299 union win_slot_encoding wslot; 351 union win_slot_encoding wslot;
@@ -306,6 +358,12 @@ struct pci_create_int_response {
306 struct tran_int_desc int_desc; 358 struct tran_int_desc int_desc;
307} __packed; 359} __packed;
308 360
361struct pci_create_interrupt2 {
362 struct pci_message message_type;
363 union win_slot_encoding wslot;
364 struct hv_msi_desc2 int_desc;
365} __packed;
366
309struct pci_delete_interrupt { 367struct pci_delete_interrupt {
310 struct pci_message message_type; 368 struct pci_message message_type;
311 union win_slot_encoding wslot; 369 union win_slot_encoding wslot;
@@ -331,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE);
331#define HV_PARTITION_ID_SELF ((u64)-1) 389#define HV_PARTITION_ID_SELF ((u64)-1)
332#define HVCALL_RETARGET_INTERRUPT 0x7e 390#define HVCALL_RETARGET_INTERRUPT 0x7e
333 391
334struct retarget_msi_interrupt { 392struct hv_interrupt_entry {
335 u64 partition_id; /* use "self" */
336 u64 device_id;
337 u32 source; /* 1 for MSI(-X) */ 393 u32 source; /* 1 for MSI(-X) */
338 u32 reserved1; 394 u32 reserved1;
339 u32 address; 395 u32 address;
340 u32 data; 396 u32 data;
341 u64 reserved2; 397};
398
399#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */
400
401struct hv_vp_set {
402 u64 format; /* 0 (HvGenericSetSparse4k) */
403 u64 valid_banks;
404 u64 masks[HV_VP_SET_BANK_COUNT_MAX];
405};
406
407/*
408 * flags for hv_device_interrupt_target.flags
409 */
410#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
411#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
412
413struct hv_device_interrupt_target {
342 u32 vector; 414 u32 vector;
343 u32 flags; 415 u32 flags;
344 u64 vp_mask; 416 union {
417 u64 vp_mask;
418 struct hv_vp_set vp_set;
419 };
420};
421
422struct retarget_msi_interrupt {
423 u64 partition_id; /* use "self" */
424 u64 device_id;
425 struct hv_interrupt_entry int_entry;
426 u64 reserved2;
427 struct hv_device_interrupt_target int_target;
345} __packed; 428} __packed;
346 429
347/* 430/*
@@ -382,7 +465,10 @@ struct hv_pcibus_device {
382 struct msi_domain_info msi_info; 465 struct msi_domain_info msi_info;
383 struct msi_controller msi_chip; 466 struct msi_controller msi_chip;
384 struct irq_domain *irq_domain; 467 struct irq_domain *irq_domain;
468
469 /* hypercall arg, must not cross page boundary */
385 struct retarget_msi_interrupt retarget_msi_interrupt_params; 470 struct retarget_msi_interrupt retarget_msi_interrupt_params;
471
386 spinlock_t retarget_msi_interrupt_lock; 472 spinlock_t retarget_msi_interrupt_lock;
387}; 473};
388 474
@@ -476,6 +562,52 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
476static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); 562static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
477static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); 563static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
478 564
565
566/*
567 * Temporary CPU to vCPU mapping to address transitioning
568 * vmbus_cpu_number_to_vp_number() being migrated to
569 * hv_cpu_number_to_vp_number() in a separate patch. Once that patch
570 * has been picked up in the main line, remove this code here and use
571 * the official code.
572 */
573static struct hv_tmpcpumap
574{
575 bool initialized;
576 u32 vp_index[NR_CPUS];
577} hv_tmpcpumap;
578
579static void hv_tmpcpumap_init_cpu(void *_unused)
580{
581 int cpu = smp_processor_id();
582 u64 vp_index;
583
584 hv_get_vp_index(vp_index);
585
586 hv_tmpcpumap.vp_index[cpu] = vp_index;
587}
588
589static void hv_tmpcpumap_init(void)
590{
591 if (hv_tmpcpumap.initialized)
592 return;
593
594 memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index));
595 on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true);
596 hv_tmpcpumap.initialized = true;
597}
598
599/**
600 * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr
601 *
602 * Remove once vmbus_cpu_number_to_vp_number() has been converted to
603 * hv_cpu_number_to_vp_number() and replace callers appropriately.
604 */
605static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu)
606{
607 return hv_tmpcpumap.vp_index[cpu];
608}
609
610
479/** 611/**
480 * devfn_to_wslot() - Convert from Linux PCI slot to Windows 612 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
481 * @devfn: The Linux representation of PCI slot 613 * @devfn: The Linux representation of PCI slot
@@ -786,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data)
786 struct cpumask *dest; 918 struct cpumask *dest;
787 struct pci_bus *pbus; 919 struct pci_bus *pbus;
788 struct pci_dev *pdev; 920 struct pci_dev *pdev;
789 int cpu;
790 unsigned long flags; 921 unsigned long flags;
922 u32 var_size = 0;
923 int cpu_vmbus;
924 int cpu;
925 u64 res;
791 926
792 dest = irq_data_get_affinity_mask(data); 927 dest = irq_data_get_affinity_mask(data);
793 pdev = msi_desc_to_pci_dev(msi_desc); 928 pdev = msi_desc_to_pci_dev(msi_desc);
@@ -799,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data)
799 params = &hbus->retarget_msi_interrupt_params; 934 params = &hbus->retarget_msi_interrupt_params;
800 memset(params, 0, sizeof(*params)); 935 memset(params, 0, sizeof(*params));
801 params->partition_id = HV_PARTITION_ID_SELF; 936 params->partition_id = HV_PARTITION_ID_SELF;
802 params->source = 1; /* MSI(-X) */ 937 params->int_entry.source = 1; /* MSI(-X) */
803 params->address = msi_desc->msg.address_lo; 938 params->int_entry.address = msi_desc->msg.address_lo;
804 params->data = msi_desc->msg.data; 939 params->int_entry.data = msi_desc->msg.data;
805 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | 940 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
806 (hbus->hdev->dev_instance.b[4] << 16) | 941 (hbus->hdev->dev_instance.b[4] << 16) |
807 (hbus->hdev->dev_instance.b[7] << 8) | 942 (hbus->hdev->dev_instance.b[7] << 8) |
808 (hbus->hdev->dev_instance.b[6] & 0xf8) | 943 (hbus->hdev->dev_instance.b[6] & 0xf8) |
809 PCI_FUNC(pdev->devfn); 944 PCI_FUNC(pdev->devfn);
810 params->vector = cfg->vector; 945 params->int_target.vector = cfg->vector;
946
947 /*
948 * Honoring apic->irq_delivery_mode set to dest_Fixed by
949 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
950 * spurious interrupt storm. Not doing so does not seem to have a
951 * negative effect (yet?).
952 */
953
954 if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
955 /*
956 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
957 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
958 * with >64 VP support.
959 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
960 * is not sufficient for this hypercall.
961 */
962 params->int_target.flags |=
963 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
964 params->int_target.vp_set.valid_banks =
965 (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
966
967 /*
968 * var-sized hypercall, var-size starts after vp_mask (thus
969 * vp_set.format does not count, but vp_set.valid_banks does).
970 */
971 var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
811 972
812 for_each_cpu_and(cpu, dest, cpu_online_mask) 973 for_each_cpu_and(cpu, dest, cpu_online_mask) {
813 params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu)); 974 cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu);
814 975
815 hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL); 976 if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
977 dev_err(&hbus->hdev->device,
978 "too high CPU %d", cpu_vmbus);
979 res = 1;
980 goto exit_unlock;
981 }
816 982
983 params->int_target.vp_set.masks[cpu_vmbus / 64] |=
984 (1ULL << (cpu_vmbus & 63));
985 }
986 } else {
987 for_each_cpu_and(cpu, dest, cpu_online_mask) {
988 params->int_target.vp_mask |=
989 (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu));
990 }
991 }
992
993 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
994 params, NULL);
995
996exit_unlock:
817 spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); 997 spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
818 998
999 if (res) {
1000 dev_err(&hbus->hdev->device,
1001 "%s() failed: %#llx", __func__, res);
1002 return;
1003 }
1004
819 pci_msi_unmask_irq(data); 1005 pci_msi_unmask_irq(data);
820} 1006}
821 1007
@@ -836,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
836 complete(&comp_pkt->comp_pkt.host_event); 1022 complete(&comp_pkt->comp_pkt.host_event);
837} 1023}
838 1024
1025static u32 hv_compose_msi_req_v1(
1026 struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
1027 u32 slot, u8 vector)
1028{
1029 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1030 int_pkt->wslot.slot = slot;
1031 int_pkt->int_desc.vector = vector;
1032 int_pkt->int_desc.vector_count = 1;
1033 int_pkt->int_desc.delivery_mode =
1034 (apic->irq_delivery_mode == dest_LowestPrio) ?
1035 dest_LowestPrio : dest_Fixed;
1036
1037 /*
1038 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1039 * hv_irq_unmask().
1040 */
1041 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1042
1043 return sizeof(*int_pkt);
1044}
1045
1046static u32 hv_compose_msi_req_v2(
1047 struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
1048 u32 slot, u8 vector)
1049{
1050 int cpu;
1051
1052 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1053 int_pkt->wslot.slot = slot;
1054 int_pkt->int_desc.vector = vector;
1055 int_pkt->int_desc.vector_count = 1;
1056 int_pkt->int_desc.delivery_mode =
1057 (apic->irq_delivery_mode == dest_LowestPrio) ?
1058 dest_LowestPrio : dest_Fixed;
1059
1060 /*
1061 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1062 * by subsequent retarget in hv_irq_unmask().
1063 */
1064 cpu = cpumask_first_and(affinity, cpu_online_mask);
1065 int_pkt->int_desc.processor_array[0] =
1066 hv_tmp_cpu_nr_to_vp_nr(cpu);
1067 int_pkt->int_desc.processor_count = 1;
1068
1069 return sizeof(*int_pkt);
1070}
1071
839/** 1072/**
840 * hv_compose_msi_msg() - Supplies a valid MSI address/data 1073 * hv_compose_msi_msg() - Supplies a valid MSI address/data
841 * @data: Everything about this MSI 1074 * @data: Everything about this MSI
@@ -854,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
854 struct hv_pci_dev *hpdev; 1087 struct hv_pci_dev *hpdev;
855 struct pci_bus *pbus; 1088 struct pci_bus *pbus;
856 struct pci_dev *pdev; 1089 struct pci_dev *pdev;
857 struct pci_create_interrupt *int_pkt;
858 struct compose_comp_ctxt comp; 1090 struct compose_comp_ctxt comp;
859 struct tran_int_desc *int_desc; 1091 struct tran_int_desc *int_desc;
860 struct cpumask *affinity;
861 struct { 1092 struct {
862 struct pci_packet pkt; 1093 struct pci_packet pci_pkt;
863 u8 buffer[sizeof(struct pci_create_interrupt)]; 1094 union {
864 } ctxt; 1095 struct pci_create_interrupt v1;
865 int cpu; 1096 struct pci_create_interrupt2 v2;
1097 } int_pkts;
1098 } __packed ctxt;
1099
1100 u32 size;
866 int ret; 1101 int ret;
867 1102
868 pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); 1103 pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
@@ -885,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
885 1120
886 memset(&ctxt, 0, sizeof(ctxt)); 1121 memset(&ctxt, 0, sizeof(ctxt));
887 init_completion(&comp.comp_pkt.host_event); 1122 init_completion(&comp.comp_pkt.host_event);
888 ctxt.pkt.completion_func = hv_pci_compose_compl; 1123 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
889 ctxt.pkt.compl_ctxt = &comp; 1124 ctxt.pci_pkt.compl_ctxt = &comp;
890 int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; 1125
891 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; 1126 switch (pci_protocol_version) {
892 int_pkt->wslot.slot = hpdev->desc.win_slot.slot; 1127 case PCI_PROTOCOL_VERSION_1_1:
893 int_pkt->int_desc.vector = cfg->vector; 1128 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
894 int_pkt->int_desc.vector_count = 1; 1129 irq_data_get_affinity_mask(data),
895 int_pkt->int_desc.delivery_mode = 1130 hpdev->desc.win_slot.slot,
896 (apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0; 1131 cfg->vector);
1132 break;
897 1133
898 /* 1134 case PCI_PROTOCOL_VERSION_1_2:
899 * This bit doesn't have to work on machines with more than 64 1135 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
900 * processors because Hyper-V only supports 64 in a guest. 1136 irq_data_get_affinity_mask(data),
901 */ 1137 hpdev->desc.win_slot.slot,
902 affinity = irq_data_get_affinity_mask(data); 1138 cfg->vector);
903 if (cpumask_weight(affinity) >= 32) { 1139 break;
904 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; 1140
905 } else { 1141 default:
906 for_each_cpu_and(cpu, affinity, cpu_online_mask) { 1142 /* As we only negotiate protocol versions known to this driver,
907 int_pkt->int_desc.cpu_mask |= 1143 * this path should never hit. However, this is it not a hot
908 (1ULL << vmbus_cpu_number_to_vp_number(cpu)); 1144 * path so we print a message to aid future updates.
909 } 1145 */
1146 dev_err(&hbus->hdev->device,
1147 "Unexpected vPCI protocol, update driver.");
1148 goto free_int_desc;
910 } 1149 }
911 1150
912 ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, 1151 ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
913 sizeof(*int_pkt), (unsigned long)&ctxt.pkt, 1152 size, (unsigned long)&ctxt.pci_pkt,
914 VM_PKT_DATA_INBAND, 1153 VM_PKT_DATA_INBAND,
915 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1154 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
916 if (ret) 1155 if (ret) {
1156 dev_err(&hbus->hdev->device,
1157 "Sending request for interrupt failed: 0x%x",
1158 comp.comp_pkt.completion_status);
917 goto free_int_desc; 1159 goto free_int_desc;
1160 }
918 1161
919 wait_for_completion(&comp.comp_pkt.host_event); 1162 wait_for_completion(&comp.comp_pkt.host_event);
920 1163
@@ -1513,12 +1756,12 @@ static void pci_devices_present_work(struct work_struct *work)
1513 put_pcichild(hpdev, hv_pcidev_ref_initial); 1756 put_pcichild(hpdev, hv_pcidev_ref_initial);
1514 } 1757 }
1515 1758
1516 switch(hbus->state) { 1759 switch (hbus->state) {
1517 case hv_pcibus_installed: 1760 case hv_pcibus_installed:
1518 /* 1761 /*
1519 * Tell the core to rescan bus 1762 * Tell the core to rescan bus
1520 * because there may have been changes. 1763 * because there may have been changes.
1521 */ 1764 */
1522 pci_lock_rescan_remove(); 1765 pci_lock_rescan_remove();
1523 pci_scan_child_bus(hbus->pci_bus); 1766 pci_scan_child_bus(hbus->pci_bus);
1524 pci_unlock_rescan_remove(); 1767 pci_unlock_rescan_remove();
@@ -1800,6 +2043,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
1800 struct hv_pci_compl comp_pkt; 2043 struct hv_pci_compl comp_pkt;
1801 struct pci_packet *pkt; 2044 struct pci_packet *pkt;
1802 int ret; 2045 int ret;
2046 int i;
1803 2047
1804 /* 2048 /*
1805 * Initiate the handshake with the host and negotiate 2049 * Initiate the handshake with the host and negotiate
@@ -1816,26 +2060,44 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
1816 pkt->compl_ctxt = &comp_pkt; 2060 pkt->compl_ctxt = &comp_pkt;
1817 version_req = (struct pci_version_request *)&pkt->message; 2061 version_req = (struct pci_version_request *)&pkt->message;
1818 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; 2062 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
1819 version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
1820 2063
1821 ret = vmbus_sendpacket(hdev->channel, version_req, 2064 for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
1822 sizeof(struct pci_version_request), 2065 version_req->protocol_version = pci_protocol_versions[i];
1823 (unsigned long)pkt, VM_PKT_DATA_INBAND, 2066 ret = vmbus_sendpacket(hdev->channel, version_req,
1824 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 2067 sizeof(struct pci_version_request),
1825 if (ret) 2068 (unsigned long)pkt, VM_PKT_DATA_INBAND,
1826 goto exit; 2069 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2070 if (ret) {
2071 dev_err(&hdev->device,
2072 "PCI Pass-through VSP failed sending version reqquest: %#x",
2073 ret);
2074 goto exit;
2075 }
1827 2076
1828 wait_for_completion(&comp_pkt.host_event); 2077 wait_for_completion(&comp_pkt.host_event);
1829 2078
1830 if (comp_pkt.completion_status < 0) { 2079 if (comp_pkt.completion_status >= 0) {
1831 dev_err(&hdev->device, 2080 pci_protocol_version = pci_protocol_versions[i];
1832 "PCI Pass-through VSP failed version request %x\n", 2081 dev_info(&hdev->device,
1833 comp_pkt.completion_status); 2082 "PCI VMBus probing: Using version %#x\n",
1834 ret = -EPROTO; 2083 pci_protocol_version);
1835 goto exit; 2084 goto exit;
2085 }
2086
2087 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
2088 dev_err(&hdev->device,
2089 "PCI Pass-through VSP failed version request: %#x",
2090 comp_pkt.completion_status);
2091 ret = -EPROTO;
2092 goto exit;
2093 }
2094
2095 reinit_completion(&comp_pkt.host_event);
1836 } 2096 }
1837 2097
1838 ret = 0; 2098 dev_err(&hdev->device,
2099 "PCI pass-through VSP failed to find supported version");
2100 ret = -EPROTO;
1839 2101
1840exit: 2102exit:
1841 kfree(pkt); 2103 kfree(pkt);
@@ -2094,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
2094{ 2356{
2095 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); 2357 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2096 struct pci_resources_assigned *res_assigned; 2358 struct pci_resources_assigned *res_assigned;
2359 struct pci_resources_assigned2 *res_assigned2;
2097 struct hv_pci_compl comp_pkt; 2360 struct hv_pci_compl comp_pkt;
2098 struct hv_pci_dev *hpdev; 2361 struct hv_pci_dev *hpdev;
2099 struct pci_packet *pkt; 2362 struct pci_packet *pkt;
2363 size_t size_res;
2100 u32 wslot; 2364 u32 wslot;
2101 int ret; 2365 int ret;
2102 2366
2103 pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL); 2367 size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
2368 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
2369
2370 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
2104 if (!pkt) 2371 if (!pkt)
2105 return -ENOMEM; 2372 return -ENOMEM;
2106 2373
@@ -2111,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
2111 if (!hpdev) 2378 if (!hpdev)
2112 continue; 2379 continue;
2113 2380
2114 memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned)); 2381 memset(pkt, 0, sizeof(*pkt) + size_res);
2115 init_completion(&comp_pkt.host_event); 2382 init_completion(&comp_pkt.host_event);
2116 pkt->completion_func = hv_pci_generic_compl; 2383 pkt->completion_func = hv_pci_generic_compl;
2117 pkt->compl_ctxt = &comp_pkt; 2384 pkt->compl_ctxt = &comp_pkt;
2118 res_assigned = (struct pci_resources_assigned *)&pkt->message;
2119 res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
2120 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
2121 2385
2386 if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
2387 res_assigned =
2388 (struct pci_resources_assigned *)&pkt->message;
2389 res_assigned->message_type.type =
2390 PCI_RESOURCES_ASSIGNED;
2391 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
2392 } else {
2393 res_assigned2 =
2394 (struct pci_resources_assigned2 *)&pkt->message;
2395 res_assigned2->message_type.type =
2396 PCI_RESOURCES_ASSIGNED2;
2397 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
2398 }
2122 put_pcichild(hpdev, hv_pcidev_ref_by_slot); 2399 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
2123 2400
2124 ret = vmbus_sendpacket( 2401 ret = vmbus_sendpacket(hdev->channel, &pkt->message,
2125 hdev->channel, &pkt->message, 2402 size_res, (unsigned long)pkt,
2126 sizeof(*res_assigned), 2403 VM_PKT_DATA_INBAND,
2127 (unsigned long)pkt, 2404 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2128 VM_PKT_DATA_INBAND,
2129 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2130 if (ret) 2405 if (ret)
2131 break; 2406 break;
2132 2407
@@ -2204,11 +2479,19 @@ static int hv_pci_probe(struct hv_device *hdev,
2204 struct hv_pcibus_device *hbus; 2479 struct hv_pcibus_device *hbus;
2205 int ret; 2480 int ret;
2206 2481
2207 hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); 2482 /*
2483 * hv_pcibus_device contains the hypercall arguments for retargeting in
2484 * hv_irq_unmask(). Those must not cross a page boundary.
2485 */
2486 BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
2487
2488 hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
2208 if (!hbus) 2489 if (!hbus)
2209 return -ENOMEM; 2490 return -ENOMEM;
2210 hbus->state = hv_pcibus_init; 2491 hbus->state = hv_pcibus_init;
2211 2492
2493 hv_tmpcpumap_init();
2494
2212 /* 2495 /*
2213 * The PCI bus "domain" is what is called "segment" in ACPI and 2496 * The PCI bus "domain" is what is called "segment" in ACPI and
2214 * other specs. Pull it from the instance ID, to get something 2497 * other specs. Pull it from the instance ID, to get something
@@ -2308,7 +2591,7 @@ free_config:
2308close: 2591close:
2309 vmbus_close(hdev->channel); 2592 vmbus_close(hdev->channel);
2310free_bus: 2593free_bus:
2311 kfree(hbus); 2594 free_page((unsigned long)hbus);
2312 return ret; 2595 return ret;
2313} 2596}
2314 2597
@@ -2386,7 +2669,7 @@ static int hv_pci_remove(struct hv_device *hdev)
2386 irq_domain_free_fwnode(hbus->sysdata.fwnode); 2669 irq_domain_free_fwnode(hbus->sysdata.fwnode);
2387 put_hvpcibus(hbus); 2670 put_hvpcibus(hbus);
2388 wait_for_completion(&hbus->remove_event); 2671 wait_for_completion(&hbus->remove_event);
2389 kfree(hbus); 2672 free_page((unsigned long)hbus);
2390 return 0; 2673 return 0;
2391} 2674}
2392 2675
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 85348590848b..6f879685fedd 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -429,7 +429,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
429 return 0; 429 return 0;
430} 430}
431 431
432static struct of_device_id rcar_pci_of_match[] = { 432static const struct of_device_id rcar_pci_of_match[] = {
433 { .compatible = "renesas,pci-r8a7790", }, 433 { .compatible = "renesas,pci-r8a7790", },
434 { .compatible = "renesas,pci-r8a7791", }, 434 { .compatible = "renesas,pci-r8a7791", },
435 { .compatible = "renesas,pci-r8a7794", }, 435 { .compatible = "renesas,pci-r8a7794", },
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 0dadb81eca70..b3722b7709df 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,8 +233,8 @@ struct tegra_msi {
233 struct msi_controller chip; 233 struct msi_controller chip;
234 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 234 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
235 struct irq_domain *domain; 235 struct irq_domain *domain;
236 unsigned long pages;
237 struct mutex lock; 236 struct mutex lock;
237 u64 phys;
238 int irq; 238 int irq;
239}; 239};
240 240
@@ -1448,9 +1448,8 @@ static int tegra_msi_setup_irq(struct msi_controller *chip,
1448 1448
1449 irq_set_msi_desc(irq, desc); 1449 irq_set_msi_desc(irq, desc);
1450 1450
1451 msg.address_lo = virt_to_phys((void *)msi->pages); 1451 msg.address_lo = lower_32_bits(msi->phys);
1452 /* 32 bit address only */ 1452 msg.address_hi = upper_32_bits(msi->phys);
1453 msg.address_hi = 0;
1454 msg.data = hwirq; 1453 msg.data = hwirq;
1455 1454
1456 pci_write_msi_msg(irq, &msg); 1455 pci_write_msi_msg(irq, &msg);
@@ -1499,7 +1498,6 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1499 const struct tegra_pcie_soc *soc = pcie->soc; 1498 const struct tegra_pcie_soc *soc = pcie->soc;
1500 struct tegra_msi *msi = &pcie->msi; 1499 struct tegra_msi *msi = &pcie->msi;
1501 struct device *dev = pcie->dev; 1500 struct device *dev = pcie->dev;
1502 unsigned long base;
1503 int err; 1501 int err;
1504 u32 reg; 1502 u32 reg;
1505 1503
@@ -1531,12 +1529,25 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1531 goto err; 1529 goto err;
1532 } 1530 }
1533 1531
1534 /* setup AFI/FPCI range */ 1532 /*
1535 msi->pages = __get_free_pages(GFP_KERNEL, 0); 1533 * The PCI host bridge on Tegra contains some logic that intercepts
1536 base = virt_to_phys((void *)msi->pages); 1534 * MSI writes, which means that the MSI target address doesn't have
1535 * to point to actual physical memory. Rather than allocating one 4
1536 * KiB page of system memory that's never used, we can simply pick
1537 * an arbitrary address within an area reserved for system memory
1538 * in the FPCI address map.
1539 *
1540 * However, in order to avoid confusion, we pick an address that
1541 * doesn't map to physical memory. The FPCI address map reserves a
1542 * 1012 GiB region for system memory and memory-mapped I/O. Since
1543 * none of the Tegra SoCs that contain this PCI host bridge can
1544 * address more than 16 GiB of system memory, the last 4 KiB of
1545 * these 1012 GiB is a good candidate.
1546 */
1547 msi->phys = 0xfcfffff000;
1537 1548
1538 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); 1549 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1539 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST); 1550 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1540 /* this register is in 4K increments */ 1551 /* this register is in 4K increments */
1541 afi_writel(pcie, 1, AFI_MSI_BAR_SZ); 1552 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1542 1553
@@ -1585,8 +1596,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1585 afi_writel(pcie, 0, AFI_MSI_EN_VEC6); 1596 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1586 afi_writel(pcie, 0, AFI_MSI_EN_VEC7); 1597 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1587 1598
1588 free_pages(msi->pages, 0);
1589
1590 if (msi->irq > 0) 1599 if (msi->irq > 0)
1591 free_irq(msi->irq, pcie); 1600 free_irq(msi->irq, pcie);
1592 1601
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index f6fcec6b5578..d417acab0ecf 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -120,6 +120,7 @@ out_release_res:
120 120
121static int versatile_pci_probe(struct platform_device *pdev) 121static int versatile_pci_probe(struct platform_device *pdev)
122{ 122{
123 struct device *dev = &pdev->dev;
123 struct resource *res; 124 struct resource *res;
124 int ret, i, myslot = -1; 125 int ret, i, myslot = -1;
125 u32 val; 126 u32 val;
@@ -128,27 +129,26 @@ static int versatile_pci_probe(struct platform_device *pdev)
128 struct pci_host_bridge *bridge; 129 struct pci_host_bridge *bridge;
129 LIST_HEAD(pci_res); 130 LIST_HEAD(pci_res);
130 131
131 bridge = devm_pci_alloc_host_bridge(&pdev->dev, 0); 132 bridge = devm_pci_alloc_host_bridge(dev, 0);
132 if (!bridge) 133 if (!bridge)
133 return -ENOMEM; 134 return -ENOMEM;
134 135
135 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 136 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
136 versatile_pci_base = devm_ioremap_resource(&pdev->dev, res); 137 versatile_pci_base = devm_ioremap_resource(dev, res);
137 if (IS_ERR(versatile_pci_base)) 138 if (IS_ERR(versatile_pci_base))
138 return PTR_ERR(versatile_pci_base); 139 return PTR_ERR(versatile_pci_base);
139 140
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 141 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
141 versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res); 142 versatile_cfg_base[0] = devm_ioremap_resource(dev, res);
142 if (IS_ERR(versatile_cfg_base[0])) 143 if (IS_ERR(versatile_cfg_base[0]))
143 return PTR_ERR(versatile_cfg_base[0]); 144 return PTR_ERR(versatile_cfg_base[0]);
144 145
145 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 146 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
146 versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev, 147 versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res);
147 res);
148 if (IS_ERR(versatile_cfg_base[1])) 148 if (IS_ERR(versatile_cfg_base[1]))
149 return PTR_ERR(versatile_cfg_base[1]); 149 return PTR_ERR(versatile_cfg_base[1]);
150 150
151 ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res); 151 ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
152 if (ret) 152 if (ret)
153 return ret; 153 return ret;
154 154
@@ -164,7 +164,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
164 } 164 }
165 } 165 }
166 if (myslot == -1) { 166 if (myslot == -1) {
167 dev_err(&pdev->dev, "Cannot find PCI core!\n"); 167 dev_err(dev, "Cannot find PCI core!\n");
168 return -EIO; 168 return -EIO;
169 } 169 }
170 /* 170 /*
@@ -172,7 +172,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
172 */ 172 */
173 pci_slot_ignore |= (1 << myslot); 173 pci_slot_ignore |= (1 << myslot);
174 174
175 dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot); 175 dev_info(dev, "PCI core found (slot %d)\n", myslot);
176 176
177 writel(myslot, PCI_SELFID); 177 writel(myslot, PCI_SELFID);
178 local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); 178 local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
@@ -205,7 +205,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
205 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC); 205 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
206 206
207 list_splice_init(&pci_res, &bridge->windows); 207 list_splice_init(&pci_res, &bridge->windows);
208 bridge->dev.parent = &pdev->dev; 208 bridge->dev.parent = dev;
209 bridge->sysdata = NULL; 209 bridge->sysdata = NULL;
210 bridge->busnr = 0; 210 bridge->busnr = 0;
211 bridge->ops = &pci_versatile_ops; 211 bridge->ops = &pci_versatile_ops;
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
new file mode 100644
index 000000000000..5a9d8589ea0b
--- /dev/null
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -0,0 +1,554 @@
1/*
2 * MediaTek PCIe host controller driver.
3 *
4 * Copyright (c) 2017 MediaTek Inc.
5 * Author: Ryder Lee <ryder.lee@mediatek.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk.h>
18#include <linux/delay.h>
19#include <linux/kernel.h>
20#include <linux/of_address.h>
21#include <linux/of_pci.h>
22#include <linux/of_platform.h>
23#include <linux/pci.h>
24#include <linux/phy/phy.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/reset.h>
28
29/* PCIe shared registers */
30#define PCIE_SYS_CFG 0x00
31#define PCIE_INT_ENABLE 0x0c
32#define PCIE_CFG_ADDR 0x20
33#define PCIE_CFG_DATA 0x24
34
35/* PCIe per port registers */
36#define PCIE_BAR0_SETUP 0x10
37#define PCIE_CLASS 0x34
38#define PCIE_LINK_STATUS 0x50
39
40#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
41#define PCIE_PORT_PERST(x) BIT(1 + (x))
42#define PCIE_PORT_LINKUP BIT(0)
43#define PCIE_BAR_MAP_MAX GENMASK(31, 16)
44
45#define PCIE_BAR_ENABLE BIT(0)
46#define PCIE_REVISION_ID BIT(0)
47#define PCIE_CLASS_CODE (0x60400 << 8)
48#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
49 ((((regn) >> 8) & GENMASK(3, 0)) << 24))
50#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
51#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
52#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
53#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
54 (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
55 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
56
57/* MediaTek specific configuration registers */
58#define PCIE_FTS_NUM 0x70c
59#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
60#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
61
62#define PCIE_FC_CREDIT 0x73c
63#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
64#define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
65
66/**
67 * struct mtk_pcie_port - PCIe port information
68 * @base: IO mapped register base
69 * @list: port list
70 * @pcie: pointer to PCIe host info
71 * @reset: pointer to port reset control
72 * @sys_ck: pointer to bus clock
73 * @phy: pointer to phy control block
74 * @lane: lane count
75 * @index: port index
76 */
77struct mtk_pcie_port {
78 void __iomem *base;
79 struct list_head list;
80 struct mtk_pcie *pcie;
81 struct reset_control *reset;
82 struct clk *sys_ck;
83 struct phy *phy;
84 u32 lane;
85 u32 index;
86};
87
88/**
89 * struct mtk_pcie - PCIe host information
90 * @dev: pointer to PCIe device
91 * @base: IO mapped register base
92 * @free_ck: free-run reference clock
93 * @io: IO resource
94 * @pio: PIO resource
95 * @mem: non-prefetchable memory resource
96 * @busn: bus range
97 * @offset: IO / Memory offset
98 * @ports: pointer to PCIe port information
99 */
100struct mtk_pcie {
101 struct device *dev;
102 void __iomem *base;
103 struct clk *free_ck;
104
105 struct resource io;
106 struct resource pio;
107 struct resource mem;
108 struct resource busn;
109 struct {
110 resource_size_t mem;
111 resource_size_t io;
112 } offset;
113 struct list_head ports;
114};
115
116static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port)
117{
118 return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP);
119}
120
121static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
122{
123 struct device *dev = pcie->dev;
124
125 clk_disable_unprepare(pcie->free_ck);
126
127 if (dev->pm_domain) {
128 pm_runtime_put_sync(dev);
129 pm_runtime_disable(dev);
130 }
131}
132
133static void mtk_pcie_port_free(struct mtk_pcie_port *port)
134{
135 struct mtk_pcie *pcie = port->pcie;
136 struct device *dev = pcie->dev;
137
138 devm_iounmap(dev, port->base);
139 list_del(&port->list);
140 devm_kfree(dev, port);
141}
142
143static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
144{
145 struct mtk_pcie_port *port, *tmp;
146
147 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
148 phy_power_off(port->phy);
149 clk_disable_unprepare(port->sys_ck);
150 mtk_pcie_port_free(port);
151 }
152
153 mtk_pcie_subsys_powerdown(pcie);
154}
155
156static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
157 unsigned int devfn, int where)
158{
159 struct pci_host_bridge *host = pci_find_host_bridge(bus);
160 struct mtk_pcie *pcie = pci_host_bridge_priv(host);
161
162 writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
163 bus->number), pcie->base + PCIE_CFG_ADDR);
164
165 return pcie->base + PCIE_CFG_DATA + (where & 3);
166}
167
168static struct pci_ops mtk_pcie_ops = {
169 .map_bus = mtk_pcie_map_bus,
170 .read = pci_generic_config_read,
171 .write = pci_generic_config_write,
172};
173
174static void mtk_pcie_configure_rc(struct mtk_pcie_port *port)
175{
176 struct mtk_pcie *pcie = port->pcie;
177 u32 func = PCI_FUNC(port->index << 3);
178 u32 slot = PCI_SLOT(port->index << 3);
179 u32 val;
180
181 /* enable interrupt */
182 val = readl(pcie->base + PCIE_INT_ENABLE);
183 val |= PCIE_PORT_INT_EN(port->index);
184 writel(val, pcie->base + PCIE_INT_ENABLE);
185
186 /* map to all DDR region. We need to set it before cfg operation. */
187 writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
188 port->base + PCIE_BAR0_SETUP);
189
190 /* configure class code and revision ID */
191 writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
192
193 /* configure FC credit */
194 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
195 pcie->base + PCIE_CFG_ADDR);
196 val = readl(pcie->base + PCIE_CFG_DATA);
197 val &= ~PCIE_FC_CREDIT_MASK;
198 val |= PCIE_FC_CREDIT_VAL(0x806c);
199 writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
200 pcie->base + PCIE_CFG_ADDR);
201 writel(val, pcie->base + PCIE_CFG_DATA);
202
203 /* configure RC FTS number to 250 when it leaves L0s */
204 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
205 pcie->base + PCIE_CFG_ADDR);
206 val = readl(pcie->base + PCIE_CFG_DATA);
207 val &= ~PCIE_FTS_NUM_MASK;
208 val |= PCIE_FTS_NUM_L0(0x50);
209 writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
210 pcie->base + PCIE_CFG_ADDR);
211 writel(val, pcie->base + PCIE_CFG_DATA);
212}
213
214static void mtk_pcie_assert_ports(struct mtk_pcie_port *port)
215{
216 struct mtk_pcie *pcie = port->pcie;
217 u32 val;
218
219 /* assert port PERST_N */
220 val = readl(pcie->base + PCIE_SYS_CFG);
221 val |= PCIE_PORT_PERST(port->index);
222 writel(val, pcie->base + PCIE_SYS_CFG);
223
224 /* de-assert port PERST_N */
225 val = readl(pcie->base + PCIE_SYS_CFG);
226 val &= ~PCIE_PORT_PERST(port->index);
227 writel(val, pcie->base + PCIE_SYS_CFG);
228
229 /* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */
230 msleep(100);
231}
232
233static void mtk_pcie_enable_ports(struct mtk_pcie_port *port)
234{
235 struct device *dev = port->pcie->dev;
236 int err;
237
238 err = clk_prepare_enable(port->sys_ck);
239 if (err) {
240 dev_err(dev, "failed to enable port%d clock\n", port->index);
241 goto err_sys_clk;
242 }
243
244 reset_control_assert(port->reset);
245 reset_control_deassert(port->reset);
246
247 err = phy_power_on(port->phy);
248 if (err) {
249 dev_err(dev, "failed to power on port%d phy\n", port->index);
250 goto err_phy_on;
251 }
252
253 mtk_pcie_assert_ports(port);
254
255 /* if link up, then setup root port configuration space */
256 if (mtk_pcie_link_up(port)) {
257 mtk_pcie_configure_rc(port);
258 return;
259 }
260
261 dev_info(dev, "Port%d link down\n", port->index);
262
263 phy_power_off(port->phy);
264err_phy_on:
265 clk_disable_unprepare(port->sys_ck);
266err_sys_clk:
267 mtk_pcie_port_free(port);
268}
269
270static int mtk_pcie_parse_ports(struct mtk_pcie *pcie,
271 struct device_node *node,
272 int index)
273{
274 struct mtk_pcie_port *port;
275 struct resource *regs;
276 struct device *dev = pcie->dev;
277 struct platform_device *pdev = to_platform_device(dev);
278 char name[10];
279 int err;
280
281 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
282 if (!port)
283 return -ENOMEM;
284
285 err = of_property_read_u32(node, "num-lanes", &port->lane);
286 if (err) {
287 dev_err(dev, "missing num-lanes property\n");
288 return err;
289 }
290
291 regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
292 port->base = devm_ioremap_resource(dev, regs);
293 if (IS_ERR(port->base)) {
294 dev_err(dev, "failed to map port%d base\n", index);
295 return PTR_ERR(port->base);
296 }
297
298 snprintf(name, sizeof(name), "sys_ck%d", index);
299 port->sys_ck = devm_clk_get(dev, name);
300 if (IS_ERR(port->sys_ck)) {
301 dev_err(dev, "failed to get port%d clock\n", index);
302 return PTR_ERR(port->sys_ck);
303 }
304
305 snprintf(name, sizeof(name), "pcie-rst%d", index);
306 port->reset = devm_reset_control_get_optional(dev, name);
307 if (PTR_ERR(port->reset) == -EPROBE_DEFER)
308 return PTR_ERR(port->reset);
309
310 /* some platforms may use default PHY setting */
311 snprintf(name, sizeof(name), "pcie-phy%d", index);
312 port->phy = devm_phy_optional_get(dev, name);
313 if (IS_ERR(port->phy))
314 return PTR_ERR(port->phy);
315
316 port->index = index;
317 port->pcie = pcie;
318
319 INIT_LIST_HEAD(&port->list);
320 list_add_tail(&port->list, &pcie->ports);
321
322 return 0;
323}
324
325static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
326{
327 struct device *dev = pcie->dev;
328 struct platform_device *pdev = to_platform_device(dev);
329 struct resource *regs;
330 int err;
331
332 /* get shared registers */
333 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
334 pcie->base = devm_ioremap_resource(dev, regs);
335 if (IS_ERR(pcie->base)) {
336 dev_err(dev, "failed to map shared register\n");
337 return PTR_ERR(pcie->base);
338 }
339
340 pcie->free_ck = devm_clk_get(dev, "free_ck");
341 if (IS_ERR(pcie->free_ck)) {
342 if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
343 return -EPROBE_DEFER;
344
345 pcie->free_ck = NULL;
346 }
347
348 if (dev->pm_domain) {
349 pm_runtime_enable(dev);
350 pm_runtime_get_sync(dev);
351 }
352
353 /* enable top level clock */
354 err = clk_prepare_enable(pcie->free_ck);
355 if (err) {
356 dev_err(dev, "failed to enable free_ck\n");
357 goto err_free_ck;
358 }
359
360 return 0;
361
362err_free_ck:
363 if (dev->pm_domain) {
364 pm_runtime_put_sync(dev);
365 pm_runtime_disable(dev);
366 }
367
368 return err;
369}
370
371static int mtk_pcie_setup(struct mtk_pcie *pcie)
372{
373 struct device *dev = pcie->dev;
374 struct device_node *node = dev->of_node, *child;
375 struct of_pci_range_parser parser;
376 struct of_pci_range range;
377 struct resource res;
378 struct mtk_pcie_port *port, *tmp;
379 int err;
380
381 if (of_pci_range_parser_init(&parser, node)) {
382 dev_err(dev, "missing \"ranges\" property\n");
383 return -EINVAL;
384 }
385
386 for_each_of_pci_range(&parser, &range) {
387 err = of_pci_range_to_resource(&range, node, &res);
388 if (err < 0)
389 return err;
390
391 switch (res.flags & IORESOURCE_TYPE_BITS) {
392 case IORESOURCE_IO:
393 pcie->offset.io = res.start - range.pci_addr;
394
395 memcpy(&pcie->pio, &res, sizeof(res));
396 pcie->pio.name = node->full_name;
397
398 pcie->io.start = range.cpu_addr;
399 pcie->io.end = range.cpu_addr + range.size - 1;
400 pcie->io.flags = IORESOURCE_MEM;
401 pcie->io.name = "I/O";
402
403 memcpy(&res, &pcie->io, sizeof(res));
404 break;
405
406 case IORESOURCE_MEM:
407 pcie->offset.mem = res.start - range.pci_addr;
408
409 memcpy(&pcie->mem, &res, sizeof(res));
410 pcie->mem.name = "non-prefetchable";
411 break;
412 }
413 }
414
415 err = of_pci_parse_bus_range(node, &pcie->busn);
416 if (err < 0) {
417 dev_err(dev, "failed to parse bus ranges property: %d\n", err);
418 pcie->busn.name = node->name;
419 pcie->busn.start = 0;
420 pcie->busn.end = 0xff;
421 pcie->busn.flags = IORESOURCE_BUS;
422 }
423
424 for_each_available_child_of_node(node, child) {
425 int index;
426
427 err = of_pci_get_devfn(child);
428 if (err < 0) {
429 dev_err(dev, "failed to parse devfn: %d\n", err);
430 return err;
431 }
432
433 index = PCI_SLOT(err);
434
435 err = mtk_pcie_parse_ports(pcie, child, index);
436 if (err)
437 return err;
438 }
439
440 err = mtk_pcie_subsys_powerup(pcie);
441 if (err)
442 return err;
443
444 /* enable each port, and then check link status */
445 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
446 mtk_pcie_enable_ports(port);
447
448 /* power down PCIe subsys if slots are all empty (link down) */
449 if (list_empty(&pcie->ports))
450 mtk_pcie_subsys_powerdown(pcie);
451
452 return 0;
453}
454
455static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
456{
457 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
458 struct list_head *windows = &host->windows;
459 struct device *dev = pcie->dev;
460 int err;
461
462 pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
463 pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
464 pci_add_resource(windows, &pcie->busn);
465
466 err = devm_request_pci_bus_resources(dev, windows);
467 if (err < 0)
468 return err;
469
470 pci_remap_iospace(&pcie->pio, pcie->io.start);
471
472 return 0;
473}
474
475static int mtk_pcie_register_host(struct pci_host_bridge *host)
476{
477 struct mtk_pcie *pcie = pci_host_bridge_priv(host);
478 struct pci_bus *child;
479 int err;
480
481 host->busnr = pcie->busn.start;
482 host->dev.parent = pcie->dev;
483 host->ops = &mtk_pcie_ops;
484 host->map_irq = of_irq_parse_and_map_pci;
485 host->swizzle_irq = pci_common_swizzle;
486
487 err = pci_scan_root_bus_bridge(host);
488 if (err < 0)
489 return err;
490
491 pci_bus_size_bridges(host->bus);
492 pci_bus_assign_resources(host->bus);
493
494 list_for_each_entry(child, &host->bus->children, node)
495 pcie_bus_configure_settings(child);
496
497 pci_bus_add_devices(host->bus);
498
499 return 0;
500}
501
502static int mtk_pcie_probe(struct platform_device *pdev)
503{
504 struct device *dev = &pdev->dev;
505 struct mtk_pcie *pcie;
506 struct pci_host_bridge *host;
507 int err;
508
509 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
510 if (!host)
511 return -ENOMEM;
512
513 pcie = pci_host_bridge_priv(host);
514
515 pcie->dev = dev;
516 platform_set_drvdata(pdev, pcie);
517 INIT_LIST_HEAD(&pcie->ports);
518
519 err = mtk_pcie_setup(pcie);
520 if (err)
521 return err;
522
523 err = mtk_pcie_request_resources(pcie);
524 if (err)
525 goto put_resources;
526
527 err = mtk_pcie_register_host(host);
528 if (err)
529 goto put_resources;
530
531 return 0;
532
533put_resources:
534 if (!list_empty(&pcie->ports))
535 mtk_pcie_put_resources(pcie);
536
537 return err;
538}
539
540static const struct of_device_id mtk_pcie_ids[] = {
541 { .compatible = "mediatek,mt7623-pcie"},
542 { .compatible = "mediatek,mt2701-pcie"},
543 {},
544};
545
546static struct platform_driver mtk_pcie_driver = {
547 .probe = mtk_pcie_probe,
548 .driver = {
549 .name = "mtk-pcie",
550 .of_match_table = mtk_pcie_ids,
551 .suppress_bind_attrs = true,
552 },
553};
554builtin_platform_driver(mtk_pcie_driver);
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 29332ba06bc1..5acf8694fb23 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -139,6 +139,7 @@
139 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ 139 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
140 PCIE_CORE_INT_MMVC) 140 PCIE_CORE_INT_MMVC)
141 141
142#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
142#define PCIE_RC_CONFIG_BASE 0xa00000 143#define PCIE_RC_CONFIG_BASE 0xa00000
143#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) 144#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
144#define PCIE_RC_CONFIG_SCC_SHIFT 16 145#define PCIE_RC_CONFIG_SCC_SHIFT 16
@@ -146,6 +147,9 @@
146#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 147#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
147#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff 148#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
148#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 149#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
150#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
151#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
152#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
149#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) 153#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
150#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) 154#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
151#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) 155#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
@@ -175,6 +179,8 @@
175#define IB_ROOT_PORT_REG_SIZE_SHIFT 3 179#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
176#define AXI_WRAPPER_IO_WRITE 0x6 180#define AXI_WRAPPER_IO_WRITE 0x6
177#define AXI_WRAPPER_MEM_WRITE 0x2 181#define AXI_WRAPPER_MEM_WRITE 0x2
182#define AXI_WRAPPER_TYPE0_CFG 0xa
183#define AXI_WRAPPER_TYPE1_CFG 0xb
178#define AXI_WRAPPER_NOR_MSG 0xc 184#define AXI_WRAPPER_NOR_MSG 0xc
179 185
180#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 186#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
@@ -198,6 +204,7 @@
198#define RC_REGION_0_ADDR_TRANS_H 0x00000000 204#define RC_REGION_0_ADDR_TRANS_H 0x00000000
199#define RC_REGION_0_ADDR_TRANS_L 0x00000000 205#define RC_REGION_0_ADDR_TRANS_L 0x00000000
200#define RC_REGION_0_PASS_BITS (25 - 1) 206#define RC_REGION_0_PASS_BITS (25 - 1)
207#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
201#define MAX_AXI_WRAPPER_REGION_NUM 33 208#define MAX_AXI_WRAPPER_REGION_NUM 33
202 209
203struct rockchip_pcie { 210struct rockchip_pcie {
@@ -295,7 +302,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
295static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, 302static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
296 int where, int size, u32 *val) 303 int where, int size, u32 *val)
297{ 304{
298 void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where; 305 void __iomem *addr;
306
307 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
299 308
300 if (!IS_ALIGNED((uintptr_t)addr, size)) { 309 if (!IS_ALIGNED((uintptr_t)addr, size)) {
301 *val = 0; 310 *val = 0;
@@ -319,11 +328,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
319 int where, int size, u32 val) 328 int where, int size, u32 val)
320{ 329{
321 u32 mask, tmp, offset; 330 u32 mask, tmp, offset;
331 void __iomem *addr;
322 332
323 offset = where & ~0x3; 333 offset = where & ~0x3;
334 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
324 335
325 if (size == 4) { 336 if (size == 4) {
326 writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); 337 writel(val, addr);
327 return PCIBIOS_SUCCESSFUL; 338 return PCIBIOS_SUCCESSFUL;
328 } 339 }
329 340
@@ -334,13 +345,33 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
334 * corrupt RW1C bits in adjacent registers. But the hardware 345 * corrupt RW1C bits in adjacent registers. But the hardware
335 * doesn't support smaller writes. 346 * doesn't support smaller writes.
336 */ 347 */
337 tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask; 348 tmp = readl(addr) & mask;
338 tmp |= val << ((where & 0x3) * 8); 349 tmp |= val << ((where & 0x3) * 8);
339 writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); 350 writel(tmp, addr);
340 351
341 return PCIBIOS_SUCCESSFUL; 352 return PCIBIOS_SUCCESSFUL;
342} 353}
343 354
355static void rockchip_pcie_cfg_configuration_accesses(
356 struct rockchip_pcie *rockchip, u32 type)
357{
358 u32 ob_desc_0;
359
360 /* Configuration Accesses for region 0 */
361 rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
362
363 rockchip_pcie_write(rockchip,
364 (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
365 PCIE_CORE_OB_REGION_ADDR0);
366 rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
367 PCIE_CORE_OB_REGION_ADDR1);
368 ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
369 ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
370 ob_desc_0 |= (type | (0x1 << 23));
371 rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
372 rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
373}
374
344static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, 375static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
345 struct pci_bus *bus, u32 devfn, 376 struct pci_bus *bus, u32 devfn,
346 int where, int size, u32 *val) 377 int where, int size, u32 *val)
@@ -355,6 +386,13 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
355 return PCIBIOS_BAD_REGISTER_NUMBER; 386 return PCIBIOS_BAD_REGISTER_NUMBER;
356 } 387 }
357 388
389 if (bus->parent->number == rockchip->root_bus_nr)
390 rockchip_pcie_cfg_configuration_accesses(rockchip,
391 AXI_WRAPPER_TYPE0_CFG);
392 else
393 rockchip_pcie_cfg_configuration_accesses(rockchip,
394 AXI_WRAPPER_TYPE1_CFG);
395
358 if (size == 4) { 396 if (size == 4) {
359 *val = readl(rockchip->reg_base + busdev); 397 *val = readl(rockchip->reg_base + busdev);
360 } else if (size == 2) { 398 } else if (size == 2) {
@@ -379,6 +417,13 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
379 if (!IS_ALIGNED(busdev, size)) 417 if (!IS_ALIGNED(busdev, size))
380 return PCIBIOS_BAD_REGISTER_NUMBER; 418 return PCIBIOS_BAD_REGISTER_NUMBER;
381 419
420 if (bus->parent->number == rockchip->root_bus_nr)
421 rockchip_pcie_cfg_configuration_accesses(rockchip,
422 AXI_WRAPPER_TYPE0_CFG);
423 else
424 rockchip_pcie_cfg_configuration_accesses(rockchip,
425 AXI_WRAPPER_TYPE1_CFG);
426
382 if (size == 4) 427 if (size == 4)
383 writel(val, rockchip->reg_base + busdev); 428 writel(val, rockchip->reg_base + busdev);
384 else if (size == 2) 429 else if (size == 2)
@@ -664,15 +709,10 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
664 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); 709 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
665 } 710 }
666 711
667 rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); 712 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
668 713 status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
669 rockchip_pcie_write(rockchip, 714 status |= PCIE_RC_CONFIG_DCSR_MPS_256;
670 (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), 715 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
671 PCIE_CORE_OB_REGION_ADDR0);
672 rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
673 PCIE_CORE_OB_REGION_ADDR1);
674 rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0);
675 rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
676 716
677 return 0; 717 return 0;
678} 718}
@@ -1156,13 +1196,16 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
1156 return 0; 1196 return 0;
1157} 1197}
1158 1198
1159static int rockchip_cfg_atu(struct rockchip_pcie *rockchip) 1199static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
1160{ 1200{
1161 struct device *dev = rockchip->dev; 1201 struct device *dev = rockchip->dev;
1162 int offset; 1202 int offset;
1163 int err; 1203 int err;
1164 int reg_no; 1204 int reg_no;
1165 1205
1206 rockchip_pcie_cfg_configuration_accesses(rockchip,
1207 AXI_WRAPPER_TYPE0_CFG);
1208
1166 for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { 1209 for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
1167 err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, 1210 err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
1168 AXI_WRAPPER_MEM_WRITE, 1211 AXI_WRAPPER_MEM_WRITE,
@@ -1251,6 +1294,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
1251 clk_disable_unprepare(rockchip->aclk_perf_pcie); 1294 clk_disable_unprepare(rockchip->aclk_perf_pcie);
1252 clk_disable_unprepare(rockchip->aclk_pcie); 1295 clk_disable_unprepare(rockchip->aclk_pcie);
1253 1296
1297 if (!IS_ERR(rockchip->vpcie0v9))
1298 regulator_disable(rockchip->vpcie0v9);
1299
1254 return ret; 1300 return ret;
1255} 1301}
1256 1302
@@ -1259,24 +1305,54 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
1259 struct rockchip_pcie *rockchip = dev_get_drvdata(dev); 1305 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1260 int err; 1306 int err;
1261 1307
1262 clk_prepare_enable(rockchip->clk_pcie_pm); 1308 if (!IS_ERR(rockchip->vpcie0v9)) {
1263 clk_prepare_enable(rockchip->hclk_pcie); 1309 err = regulator_enable(rockchip->vpcie0v9);
1264 clk_prepare_enable(rockchip->aclk_perf_pcie); 1310 if (err) {
1265 clk_prepare_enable(rockchip->aclk_pcie); 1311 dev_err(dev, "fail to enable vpcie0v9 regulator\n");
1312 return err;
1313 }
1314 }
1315
1316 err = clk_prepare_enable(rockchip->clk_pcie_pm);
1317 if (err)
1318 goto err_pcie_pm;
1319
1320 err = clk_prepare_enable(rockchip->hclk_pcie);
1321 if (err)
1322 goto err_hclk_pcie;
1323
1324 err = clk_prepare_enable(rockchip->aclk_perf_pcie);
1325 if (err)
1326 goto err_aclk_perf_pcie;
1327
1328 err = clk_prepare_enable(rockchip->aclk_pcie);
1329 if (err)
1330 goto err_aclk_pcie;
1266 1331
1267 err = rockchip_pcie_init_port(rockchip); 1332 err = rockchip_pcie_init_port(rockchip);
1268 if (err) 1333 if (err)
1269 return err; 1334 goto err_pcie_resume;
1270 1335
1271 err = rockchip_cfg_atu(rockchip); 1336 err = rockchip_pcie_cfg_atu(rockchip);
1272 if (err) 1337 if (err)
1273 return err; 1338 goto err_pcie_resume;
1274 1339
1275 /* Need this to enter L1 again */ 1340 /* Need this to enter L1 again */
1276 rockchip_pcie_update_txcredit_mui(rockchip); 1341 rockchip_pcie_update_txcredit_mui(rockchip);
1277 rockchip_pcie_enable_interrupts(rockchip); 1342 rockchip_pcie_enable_interrupts(rockchip);
1278 1343
1279 return 0; 1344 return 0;
1345
1346err_pcie_resume:
1347 clk_disable_unprepare(rockchip->aclk_pcie);
1348err_aclk_pcie:
1349 clk_disable_unprepare(rockchip->aclk_perf_pcie);
1350err_aclk_perf_pcie:
1351 clk_disable_unprepare(rockchip->hclk_pcie);
1352err_hclk_pcie:
1353 clk_disable_unprepare(rockchip->clk_pcie_pm);
1354err_pcie_pm:
1355 return err;
1280} 1356}
1281 1357
1282static int rockchip_pcie_probe(struct platform_device *pdev) 1358static int rockchip_pcie_probe(struct platform_device *pdev)
@@ -1388,19 +1464,18 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
1388 } 1464 }
1389 } 1465 }
1390 1466
1391 err = rockchip_cfg_atu(rockchip); 1467 err = rockchip_pcie_cfg_atu(rockchip);
1392 if (err) 1468 if (err)
1393 goto err_free_res; 1469 goto err_free_res;
1394 1470
1395 rockchip->msg_region = devm_ioremap(rockchip->dev, 1471 rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
1396 rockchip->msg_bus_addr, SZ_1M);
1397 if (!rockchip->msg_region) { 1472 if (!rockchip->msg_region) {
1398 err = -ENOMEM; 1473 err = -ENOMEM;
1399 goto err_free_res; 1474 goto err_free_res;
1400 } 1475 }
1401 1476
1402 list_splice_init(&res, &bridge->windows); 1477 list_splice_init(&res, &bridge->windows);
1403 bridge->dev.parent = &pdev->dev; 1478 bridge->dev.parent = dev;
1404 bridge->sysdata = rockchip; 1479 bridge->sysdata = rockchip;
1405 bridge->busnr = 0; 1480 bridge->busnr = 0;
1406 bridge->ops = &rockchip_pcie_ops; 1481 bridge->ops = &rockchip_pcie_ops;
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 9d83f37b4cb7..4c8a1257ab5d 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -707,7 +707,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
707 707
708 INIT_LIST_HEAD(&vmd->irqs[i].irq_list); 708 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
709 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), 709 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
710 vmd_irq, 0, "vmd", &vmd->irqs[i]); 710 vmd_irq, IRQF_NO_THREAD,
711 "vmd", &vmd->irqs[i]);
711 if (err) 712 if (err)
712 return err; 713 return err;
713 } 714 }
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index d9dc7363ac77..120485d6f352 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -461,8 +461,6 @@ found:
461 else 461 else
462 iov->dev = dev; 462 iov->dev = dev;
463 463
464 mutex_init(&iov->lock);
465
466 dev->sriov = iov; 464 dev->sriov = iov;
467 dev->is_physfn = 1; 465 dev->is_physfn = 1;
468 rc = compute_max_vf_buses(dev); 466 rc = compute_max_vf_buses(dev);
@@ -491,8 +489,6 @@ static void sriov_release(struct pci_dev *dev)
491 if (dev != dev->sriov->dev) 489 if (dev != dev->sriov->dev)
492 pci_dev_put(dev->sriov->dev); 490 pci_dev_put(dev->sriov->dev);
493 491
494 mutex_destroy(&dev->sriov->lock);
495
496 kfree(dev->sriov); 492 kfree(dev->sriov);
497 dev->sriov = NULL; 493 dev->sriov = NULL;
498} 494}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ba44fdfda66b..9e1569107cd6 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1058 1058
1059 for (;;) { 1059 for (;;) {
1060 if (affd) { 1060 if (affd) {
1061 nvec = irq_calc_affinity_vectors(nvec, affd); 1061 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
1062 if (nvec < minvec) 1062 if (nvec < minvec)
1063 return -ENOSPC; 1063 return -ENOSPC;
1064 } 1064 }
@@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
1097 1097
1098 for (;;) { 1098 for (;;) {
1099 if (affd) { 1099 if (affd) {
1100 nvec = irq_calc_affinity_vectors(nvec, affd); 1100 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
1101 if (nvec < minvec) 1101 if (nvec < minvec)
1102 return -ENOSPC; 1102 return -ENOSPC;
1103 } 1103 }
@@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1165 if (flags & PCI_IRQ_AFFINITY) { 1165 if (flags & PCI_IRQ_AFFINITY) {
1166 if (!affd) 1166 if (!affd)
1167 affd = &msi_default_affd; 1167 affd = &msi_default_affd;
1168
1169 if (affd->pre_vectors + affd->post_vectors > min_vecs)
1170 return -EINVAL;
1171
1172 /*
1173 * If there aren't any vectors left after applying the pre/post
1174 * vectors don't bother with assigning affinity.
1175 */
1176 if (affd->pre_vectors + affd->post_vectors == min_vecs)
1177 affd = NULL;
1178 } else { 1168 } else {
1179 if (WARN_ON(affd)) 1169 if (WARN_ON(affd))
1180 affd = NULL; 1170 affd = NULL;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 11167c65ca37..50f93a3bc4c0 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -966,6 +966,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
966 return pci_legacy_resume_early(dev); 966 return pci_legacy_resume_early(dev);
967 967
968 pci_update_current_state(pci_dev, PCI_D0); 968 pci_update_current_state(pci_dev, PCI_D0);
969 pci_restore_state(pci_dev);
969 970
970 if (drv && drv->pm && drv->pm->thaw_noirq) 971 if (drv && drv->pm && drv->pm->thaw_noirq)
971 error = drv->pm->thaw_noirq(dev); 972 error = drv->pm->thaw_noirq(dev);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 51357377efbc..1d828a614ac0 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
43{ 43{
44 const struct dmi_device *dmi; 44 const struct dmi_device *dmi;
45 struct dmi_dev_onboard *donboard; 45 struct dmi_dev_onboard *donboard;
46 int domain_nr;
46 int bus; 47 int bus;
47 int devfn; 48 int devfn;
48 49
50 domain_nr = pci_domain_nr(pdev->bus);
49 bus = pdev->bus->number; 51 bus = pdev->bus->number;
50 devfn = pdev->devfn; 52 devfn = pdev->devfn;
51 53
@@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
53 while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, 55 while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
54 NULL, dmi)) != NULL) { 56 NULL, dmi)) != NULL) {
55 donboard = dmi->device_data; 57 donboard = dmi->device_data;
56 if (donboard && donboard->bus == bus && 58 if (donboard && donboard->segment == domain_nr &&
57 donboard->devfn == devfn) { 59 donboard->bus == bus &&
60 donboard->devfn == devfn) {
58 if (buf) { 61 if (buf) {
59 if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) 62 if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
60 return scnprintf(buf, PAGE_SIZE, 63 return scnprintf(buf, PAGE_SIZE,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 31e99613a12e..2f3780b50723 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
154} 154}
155static DEVICE_ATTR_RO(resource); 155static DEVICE_ATTR_RO(resource);
156 156
157static ssize_t max_link_speed_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
159{
160 struct pci_dev *pci_dev = to_pci_dev(dev);
161 u32 linkcap;
162 int err;
163 const char *speed;
164
165 err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
166 if (err)
167 return -EINVAL;
168
169 switch (linkcap & PCI_EXP_LNKCAP_SLS) {
170 case PCI_EXP_LNKCAP_SLS_8_0GB:
171 speed = "8 GT/s";
172 break;
173 case PCI_EXP_LNKCAP_SLS_5_0GB:
174 speed = "5 GT/s";
175 break;
176 case PCI_EXP_LNKCAP_SLS_2_5GB:
177 speed = "2.5 GT/s";
178 break;
179 default:
180 speed = "Unknown speed";
181 }
182
183 return sprintf(buf, "%s\n", speed);
184}
185static DEVICE_ATTR_RO(max_link_speed);
186
187static ssize_t max_link_width_show(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{
190 struct pci_dev *pci_dev = to_pci_dev(dev);
191 u32 linkcap;
192 int err;
193
194 err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
195 if (err)
196 return -EINVAL;
197
198 return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4);
199}
200static DEVICE_ATTR_RO(max_link_width);
201
202static ssize_t current_link_speed_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
204{
205 struct pci_dev *pci_dev = to_pci_dev(dev);
206 u16 linkstat;
207 int err;
208 const char *speed;
209
210 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
211 if (err)
212 return -EINVAL;
213
214 switch (linkstat & PCI_EXP_LNKSTA_CLS) {
215 case PCI_EXP_LNKSTA_CLS_8_0GB:
216 speed = "8 GT/s";
217 break;
218 case PCI_EXP_LNKSTA_CLS_5_0GB:
219 speed = "5 GT/s";
220 break;
221 case PCI_EXP_LNKSTA_CLS_2_5GB:
222 speed = "2.5 GT/s";
223 break;
224 default:
225 speed = "Unknown speed";
226 }
227
228 return sprintf(buf, "%s\n", speed);
229}
230static DEVICE_ATTR_RO(current_link_speed);
231
232static ssize_t current_link_width_show(struct device *dev,
233 struct device_attribute *attr, char *buf)
234{
235 struct pci_dev *pci_dev = to_pci_dev(dev);
236 u16 linkstat;
237 int err;
238
239 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
240 if (err)
241 return -EINVAL;
242
243 return sprintf(buf, "%u\n",
244 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
245}
246static DEVICE_ATTR_RO(current_link_width);
247
248static ssize_t secondary_bus_number_show(struct device *dev,
249 struct device_attribute *attr,
250 char *buf)
251{
252 struct pci_dev *pci_dev = to_pci_dev(dev);
253 u8 sec_bus;
254 int err;
255
256 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
257 if (err)
258 return -EINVAL;
259
260 return sprintf(buf, "%u\n", sec_bus);
261}
262static DEVICE_ATTR_RO(secondary_bus_number);
263
264static ssize_t subordinate_bus_number_show(struct device *dev,
265 struct device_attribute *attr,
266 char *buf)
267{
268 struct pci_dev *pci_dev = to_pci_dev(dev);
269 u8 sub_bus;
270 int err;
271
272 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
273 if (err)
274 return -EINVAL;
275
276 return sprintf(buf, "%u\n", sub_bus);
277}
278static DEVICE_ATTR_RO(subordinate_bus_number);
279
157static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 280static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
158 char *buf) 281 char *buf)
159{ 282{
@@ -472,7 +595,6 @@ static ssize_t sriov_numvfs_store(struct device *dev,
472 const char *buf, size_t count) 595 const char *buf, size_t count)
473{ 596{
474 struct pci_dev *pdev = to_pci_dev(dev); 597 struct pci_dev *pdev = to_pci_dev(dev);
475 struct pci_sriov *iov = pdev->sriov;
476 int ret; 598 int ret;
477 u16 num_vfs; 599 u16 num_vfs;
478 600
@@ -483,7 +605,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
483 if (num_vfs > pci_sriov_get_totalvfs(pdev)) 605 if (num_vfs > pci_sriov_get_totalvfs(pdev))
484 return -ERANGE; 606 return -ERANGE;
485 607
486 mutex_lock(&iov->dev->sriov->lock); 608 device_lock(&pdev->dev);
487 609
488 if (num_vfs == pdev->sriov->num_VFs) 610 if (num_vfs == pdev->sriov->num_VFs)
489 goto exit; 611 goto exit;
@@ -518,7 +640,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
518 num_vfs, ret); 640 num_vfs, ret);
519 641
520exit: 642exit:
521 mutex_unlock(&iov->dev->sriov->lock); 643 device_unlock(&pdev->dev);
522 644
523 if (ret < 0) 645 if (ret < 0)
524 return ret; 646 return ret;
@@ -629,12 +751,17 @@ static struct attribute *pci_dev_attrs[] = {
629 NULL, 751 NULL,
630}; 752};
631 753
632static const struct attribute_group pci_dev_group = { 754static struct attribute *pci_bridge_attrs[] = {
633 .attrs = pci_dev_attrs, 755 &dev_attr_subordinate_bus_number.attr,
756 &dev_attr_secondary_bus_number.attr,
757 NULL,
634}; 758};
635 759
636const struct attribute_group *pci_dev_groups[] = { 760static struct attribute *pcie_dev_attrs[] = {
637 &pci_dev_group, 761 &dev_attr_current_link_speed.attr,
762 &dev_attr_current_link_width.attr,
763 &dev_attr_max_link_width.attr,
764 &dev_attr_max_link_speed.attr,
638 NULL, 765 NULL,
639}; 766};
640 767
@@ -1557,6 +1684,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1557 return a->mode; 1684 return a->mode;
1558} 1685}
1559 1686
1687static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1688 struct attribute *a, int n)
1689{
1690 struct device *dev = kobj_to_dev(kobj);
1691 struct pci_dev *pdev = to_pci_dev(dev);
1692
1693 if (pci_is_bridge(pdev))
1694 return a->mode;
1695
1696 return 0;
1697}
1698
1699static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1700 struct attribute *a, int n)
1701{
1702 struct device *dev = kobj_to_dev(kobj);
1703 struct pci_dev *pdev = to_pci_dev(dev);
1704
1705 if (pci_is_pcie(pdev))
1706 return a->mode;
1707
1708 return 0;
1709}
1710
1711static const struct attribute_group pci_dev_group = {
1712 .attrs = pci_dev_attrs,
1713};
1714
1715const struct attribute_group *pci_dev_groups[] = {
1716 &pci_dev_group,
1717 NULL,
1718};
1719
1720static const struct attribute_group pci_bridge_group = {
1721 .attrs = pci_bridge_attrs,
1722};
1723
1724const struct attribute_group *pci_bridge_groups[] = {
1725 &pci_bridge_group,
1726 NULL,
1727};
1728
1729static const struct attribute_group pcie_dev_group = {
1730 .attrs = pcie_dev_attrs,
1731};
1732
1733const struct attribute_group *pcie_dev_groups[] = {
1734 &pcie_dev_group,
1735 NULL,
1736};
1737
1560static struct attribute_group pci_dev_hp_attr_group = { 1738static struct attribute_group pci_dev_hp_attr_group = {
1561 .attrs = pci_dev_hp_attrs, 1739 .attrs = pci_dev_hp_attrs,
1562 .is_visible = pci_dev_hp_attrs_are_visible, 1740 .is_visible = pci_dev_hp_attrs_are_visible,
@@ -1592,12 +1770,24 @@ static struct attribute_group pci_dev_attr_group = {
1592 .is_visible = pci_dev_attrs_are_visible, 1770 .is_visible = pci_dev_attrs_are_visible,
1593}; 1771};
1594 1772
1773static struct attribute_group pci_bridge_attr_group = {
1774 .attrs = pci_bridge_attrs,
1775 .is_visible = pci_bridge_attrs_are_visible,
1776};
1777
1778static struct attribute_group pcie_dev_attr_group = {
1779 .attrs = pcie_dev_attrs,
1780 .is_visible = pcie_dev_attrs_are_visible,
1781};
1782
1595static const struct attribute_group *pci_dev_attr_groups[] = { 1783static const struct attribute_group *pci_dev_attr_groups[] = {
1596 &pci_dev_attr_group, 1784 &pci_dev_attr_group,
1597 &pci_dev_hp_attr_group, 1785 &pci_dev_hp_attr_group,
1598#ifdef CONFIG_PCI_IOV 1786#ifdef CONFIG_PCI_IOV
1599 &sriov_dev_attr_group, 1787 &sriov_dev_attr_group,
1600#endif 1788#endif
1789 &pci_bridge_attr_group,
1790 &pcie_dev_attr_group,
1601 NULL, 1791 NULL,
1602}; 1792};
1603 1793
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b01bd5bba8e6..34b5fe025dd4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -28,6 +28,7 @@
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29#include <linux/pci_hotplug.h> 29#include <linux/pci_hotplug.h>
30#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
31#include <linux/pci-ats.h>
31#include <asm/setup.h> 32#include <asm/setup.h>
32#include <asm/dma.h> 33#include <asm/dma.h>
33#include <linux/aer.h> 34#include <linux/aer.h>
@@ -455,7 +456,7 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
455 pci_bus_for_each_resource(bus, r, i) { 456 pci_bus_for_each_resource(bus, r, i) {
456 if (!r) 457 if (!r)
457 continue; 458 continue;
458 if (res->start && resource_contains(r, res)) { 459 if (resource_contains(r, res)) {
459 460
460 /* 461 /*
461 * If the window is prefetchable but the BAR is 462 * If the window is prefetchable but the BAR is
@@ -1173,6 +1174,8 @@ void pci_restore_state(struct pci_dev *dev)
1173 1174
1174 /* PCI Express register must be restored first */ 1175 /* PCI Express register must be restored first */
1175 pci_restore_pcie_state(dev); 1176 pci_restore_pcie_state(dev);
1177 pci_restore_pasid_state(dev);
1178 pci_restore_pri_state(dev);
1176 pci_restore_ats_state(dev); 1179 pci_restore_ats_state(dev);
1177 pci_restore_vc_state(dev); 1180 pci_restore_vc_state(dev);
1178 1181
@@ -1960,12 +1963,13 @@ EXPORT_SYMBOL(pci_wake_from_d3);
1960/** 1963/**
1961 * pci_target_state - find an appropriate low power state for a given PCI dev 1964 * pci_target_state - find an appropriate low power state for a given PCI dev
1962 * @dev: PCI device 1965 * @dev: PCI device
1966 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
1963 * 1967 *
1964 * Use underlying platform code to find a supported low power state for @dev. 1968 * Use underlying platform code to find a supported low power state for @dev.
1965 * If the platform can't manage @dev, return the deepest state from which it 1969 * If the platform can't manage @dev, return the deepest state from which it
1966 * can generate wake events, based on any available PME info. 1970 * can generate wake events, based on any available PME info.
1967 */ 1971 */
1968static pci_power_t pci_target_state(struct pci_dev *dev) 1972static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
1969{ 1973{
1970 pci_power_t target_state = PCI_D3hot; 1974 pci_power_t target_state = PCI_D3hot;
1971 1975
@@ -2002,7 +2006,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
2002 if (dev->current_state == PCI_D3cold) 2006 if (dev->current_state == PCI_D3cold)
2003 target_state = PCI_D3cold; 2007 target_state = PCI_D3cold;
2004 2008
2005 if (device_may_wakeup(&dev->dev)) { 2009 if (wakeup) {
2006 /* 2010 /*
2007 * Find the deepest state from which the device can generate 2011 * Find the deepest state from which the device can generate
2008 * wake-up events, make it the target state and enable device 2012 * wake-up events, make it the target state and enable device
@@ -2028,13 +2032,14 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
2028 */ 2032 */
2029int pci_prepare_to_sleep(struct pci_dev *dev) 2033int pci_prepare_to_sleep(struct pci_dev *dev)
2030{ 2034{
2031 pci_power_t target_state = pci_target_state(dev); 2035 bool wakeup = device_may_wakeup(&dev->dev);
2036 pci_power_t target_state = pci_target_state(dev, wakeup);
2032 int error; 2037 int error;
2033 2038
2034 if (target_state == PCI_POWER_ERROR) 2039 if (target_state == PCI_POWER_ERROR)
2035 return -EIO; 2040 return -EIO;
2036 2041
2037 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 2042 pci_enable_wake(dev, target_state, wakeup);
2038 2043
2039 error = pci_set_power_state(dev, target_state); 2044 error = pci_set_power_state(dev, target_state);
2040 2045
@@ -2067,9 +2072,10 @@ EXPORT_SYMBOL(pci_back_from_sleep);
2067 */ 2072 */
2068int pci_finish_runtime_suspend(struct pci_dev *dev) 2073int pci_finish_runtime_suspend(struct pci_dev *dev)
2069{ 2074{
2070 pci_power_t target_state = pci_target_state(dev); 2075 pci_power_t target_state;
2071 int error; 2076 int error;
2072 2077
2078 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2073 if (target_state == PCI_POWER_ERROR) 2079 if (target_state == PCI_POWER_ERROR)
2074 return -EIO; 2080 return -EIO;
2075 2081
@@ -2105,8 +2111,8 @@ bool pci_dev_run_wake(struct pci_dev *dev)
2105 if (!dev->pme_support) 2111 if (!dev->pme_support)
2106 return false; 2112 return false;
2107 2113
2108 /* PME-capable in principle, but not from the intended sleep state */ 2114 /* PME-capable in principle, but not from the target power state */
2109 if (!pci_pme_capable(dev, pci_target_state(dev))) 2115 if (!pci_pme_capable(dev, pci_target_state(dev, false)))
2110 return false; 2116 return false;
2111 2117
2112 while (bus->parent) { 2118 while (bus->parent) {
@@ -2141,10 +2147,12 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2141bool pci_dev_keep_suspended(struct pci_dev *pci_dev) 2147bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2142{ 2148{
2143 struct device *dev = &pci_dev->dev; 2149 struct device *dev = &pci_dev->dev;
2150 bool wakeup = device_may_wakeup(dev);
2144 2151
2145 if (!pm_runtime_suspended(dev) 2152 if (!pm_runtime_suspended(dev)
2146 || pci_target_state(pci_dev) != pci_dev->current_state 2153 || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
2147 || platform_pci_need_resume(pci_dev)) 2154 || platform_pci_need_resume(pci_dev)
2155 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2148 return false; 2156 return false;
2149 2157
2150 /* 2158 /*
@@ -2160,7 +2168,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2160 spin_lock_irq(&dev->power.lock); 2168 spin_lock_irq(&dev->power.lock);
2161 2169
2162 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && 2170 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2163 !device_may_wakeup(dev)) 2171 !wakeup)
2164 __pci_pme_active(pci_dev, false); 2172 __pci_pme_active(pci_dev, false);
2165 2173
2166 spin_unlock_irq(&dev->power.lock); 2174 spin_unlock_irq(&dev->power.lock);
@@ -3708,46 +3716,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
3708} 3716}
3709EXPORT_SYMBOL_GPL(pci_intx); 3717EXPORT_SYMBOL_GPL(pci_intx);
3710 3718
3711/**
3712 * pci_intx_mask_supported - probe for INTx masking support
3713 * @dev: the PCI device to operate on
3714 *
3715 * Check if the device dev support INTx masking via the config space
3716 * command word.
3717 */
3718bool pci_intx_mask_supported(struct pci_dev *dev)
3719{
3720 bool mask_supported = false;
3721 u16 orig, new;
3722
3723 if (dev->broken_intx_masking)
3724 return false;
3725
3726 pci_cfg_access_lock(dev);
3727
3728 pci_read_config_word(dev, PCI_COMMAND, &orig);
3729 pci_write_config_word(dev, PCI_COMMAND,
3730 orig ^ PCI_COMMAND_INTX_DISABLE);
3731 pci_read_config_word(dev, PCI_COMMAND, &new);
3732
3733 /*
3734 * There's no way to protect against hardware bugs or detect them
3735 * reliably, but as long as we know what the value should be, let's
3736 * go ahead and check it.
3737 */
3738 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3739 dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3740 orig, new);
3741 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3742 mask_supported = true;
3743 pci_write_config_word(dev, PCI_COMMAND, orig);
3744 }
3745
3746 pci_cfg_access_unlock(dev);
3747 return mask_supported;
3748}
3749EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3750
3751static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) 3719static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3752{ 3720{
3753 struct pci_bus *bus = dev->bus; 3721 struct pci_bus *bus = dev->bus;
@@ -3798,7 +3766,7 @@ done:
3798 * @dev: the PCI device to operate on 3766 * @dev: the PCI device to operate on
3799 * 3767 *
3800 * Check if the device dev has its INTx line asserted, mask it and 3768 * Check if the device dev has its INTx line asserted, mask it and
3801 * return true in that case. False is returned if not interrupt was 3769 * return true in that case. False is returned if no interrupt was
3802 * pending. 3770 * pending.
3803 */ 3771 */
3804bool pci_check_and_mask_intx(struct pci_dev *dev) 3772bool pci_check_and_mask_intx(struct pci_dev *dev)
@@ -4068,40 +4036,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4068 return pci_reset_hotplug_slot(dev->slot->hotplug, probe); 4036 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4069} 4037}
4070 4038
4071static int __pci_dev_reset(struct pci_dev *dev, int probe)
4072{
4073 int rc;
4074
4075 might_sleep();
4076
4077 rc = pci_dev_specific_reset(dev, probe);
4078 if (rc != -ENOTTY)
4079 goto done;
4080
4081 if (pcie_has_flr(dev)) {
4082 if (!probe)
4083 pcie_flr(dev);
4084 rc = 0;
4085 goto done;
4086 }
4087
4088 rc = pci_af_flr(dev, probe);
4089 if (rc != -ENOTTY)
4090 goto done;
4091
4092 rc = pci_pm_reset(dev, probe);
4093 if (rc != -ENOTTY)
4094 goto done;
4095
4096 rc = pci_dev_reset_slot_function(dev, probe);
4097 if (rc != -ENOTTY)
4098 goto done;
4099
4100 rc = pci_parent_bus_reset(dev, probe);
4101done:
4102 return rc;
4103}
4104
4105static void pci_dev_lock(struct pci_dev *dev) 4039static void pci_dev_lock(struct pci_dev *dev)
4106{ 4040{
4107 pci_cfg_access_lock(dev); 4041 pci_cfg_access_lock(dev);
@@ -4127,26 +4061,18 @@ static void pci_dev_unlock(struct pci_dev *dev)
4127 pci_cfg_access_unlock(dev); 4061 pci_cfg_access_unlock(dev);
4128} 4062}
4129 4063
4130/** 4064static void pci_dev_save_and_disable(struct pci_dev *dev)
4131 * pci_reset_notify - notify device driver of reset
4132 * @dev: device to be notified of reset
4133 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
4134 * completed
4135 *
4136 * Must be called prior to device access being disabled and after device
4137 * access is restored.
4138 */
4139static void pci_reset_notify(struct pci_dev *dev, bool prepare)
4140{ 4065{
4141 const struct pci_error_handlers *err_handler = 4066 const struct pci_error_handlers *err_handler =
4142 dev->driver ? dev->driver->err_handler : NULL; 4067 dev->driver ? dev->driver->err_handler : NULL;
4143 if (err_handler && err_handler->reset_notify)
4144 err_handler->reset_notify(dev, prepare);
4145}
4146 4068
4147static void pci_dev_save_and_disable(struct pci_dev *dev) 4069 /*
4148{ 4070 * dev->driver->err_handler->reset_prepare() is protected against
4149 pci_reset_notify(dev, true); 4071 * races with ->remove() by the device lock, which must be held by
4072 * the caller.
4073 */
4074 if (err_handler && err_handler->reset_prepare)
4075 err_handler->reset_prepare(dev);
4150 4076
4151 /* 4077 /*
4152 * Wake-up device prior to save. PM registers default to D0 after 4078 * Wake-up device prior to save. PM registers default to D0 after
@@ -4168,23 +4094,18 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
4168 4094
4169static void pci_dev_restore(struct pci_dev *dev) 4095static void pci_dev_restore(struct pci_dev *dev)
4170{ 4096{
4171 pci_restore_state(dev); 4097 const struct pci_error_handlers *err_handler =
4172 pci_reset_notify(dev, false); 4098 dev->driver ? dev->driver->err_handler : NULL;
4173}
4174
4175static int pci_dev_reset(struct pci_dev *dev, int probe)
4176{
4177 int rc;
4178
4179 if (!probe)
4180 pci_dev_lock(dev);
4181
4182 rc = __pci_dev_reset(dev, probe);
4183 4099
4184 if (!probe) 4100 pci_restore_state(dev);
4185 pci_dev_unlock(dev);
4186 4101
4187 return rc; 4102 /*
4103 * dev->driver->err_handler->reset_done() is protected against
4104 * races with ->remove() by the device lock, which must be held by
4105 * the caller.
4106 */
4107 if (err_handler && err_handler->reset_done)
4108 err_handler->reset_done(dev);
4188} 4109}
4189 4110
4190/** 4111/**
@@ -4206,7 +4127,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
4206 */ 4127 */
4207int __pci_reset_function(struct pci_dev *dev) 4128int __pci_reset_function(struct pci_dev *dev)
4208{ 4129{
4209 return pci_dev_reset(dev, 0); 4130 int ret;
4131
4132 pci_dev_lock(dev);
4133 ret = __pci_reset_function_locked(dev);
4134 pci_dev_unlock(dev);
4135
4136 return ret;
4210} 4137}
4211EXPORT_SYMBOL_GPL(__pci_reset_function); 4138EXPORT_SYMBOL_GPL(__pci_reset_function);
4212 4139
@@ -4231,7 +4158,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function);
4231 */ 4158 */
4232int __pci_reset_function_locked(struct pci_dev *dev) 4159int __pci_reset_function_locked(struct pci_dev *dev)
4233{ 4160{
4234 return __pci_dev_reset(dev, 0); 4161 int rc;
4162
4163 might_sleep();
4164
4165 rc = pci_dev_specific_reset(dev, 0);
4166 if (rc != -ENOTTY)
4167 return rc;
4168 if (pcie_has_flr(dev)) {
4169 pcie_flr(dev);
4170 return 0;
4171 }
4172 rc = pci_af_flr(dev, 0);
4173 if (rc != -ENOTTY)
4174 return rc;
4175 rc = pci_pm_reset(dev, 0);
4176 if (rc != -ENOTTY)
4177 return rc;
4178 rc = pci_dev_reset_slot_function(dev, 0);
4179 if (rc != -ENOTTY)
4180 return rc;
4181 return pci_parent_bus_reset(dev, 0);
4235} 4182}
4236EXPORT_SYMBOL_GPL(__pci_reset_function_locked); 4183EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4237 4184
@@ -4248,7 +4195,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4248 */ 4195 */
4249int pci_probe_reset_function(struct pci_dev *dev) 4196int pci_probe_reset_function(struct pci_dev *dev)
4250{ 4197{
4251 return pci_dev_reset(dev, 1); 4198 int rc;
4199
4200 might_sleep();
4201
4202 rc = pci_dev_specific_reset(dev, 1);
4203 if (rc != -ENOTTY)
4204 return rc;
4205 if (pcie_has_flr(dev))
4206 return 0;
4207 rc = pci_af_flr(dev, 1);
4208 if (rc != -ENOTTY)
4209 return rc;
4210 rc = pci_pm_reset(dev, 1);
4211 if (rc != -ENOTTY)
4212 return rc;
4213 rc = pci_dev_reset_slot_function(dev, 1);
4214 if (rc != -ENOTTY)
4215 return rc;
4216
4217 return pci_parent_bus_reset(dev, 1);
4252} 4218}
4253 4219
4254/** 4220/**
@@ -4271,15 +4237,17 @@ int pci_reset_function(struct pci_dev *dev)
4271{ 4237{
4272 int rc; 4238 int rc;
4273 4239
4274 rc = pci_dev_reset(dev, 1); 4240 rc = pci_probe_reset_function(dev);
4275 if (rc) 4241 if (rc)
4276 return rc; 4242 return rc;
4277 4243
4244 pci_dev_lock(dev);
4278 pci_dev_save_and_disable(dev); 4245 pci_dev_save_and_disable(dev);
4279 4246
4280 rc = pci_dev_reset(dev, 0); 4247 rc = __pci_reset_function_locked(dev);
4281 4248
4282 pci_dev_restore(dev); 4249 pci_dev_restore(dev);
4250 pci_dev_unlock(dev);
4283 4251
4284 return rc; 4252 return rc;
4285} 4253}
@@ -4295,20 +4263,18 @@ int pci_try_reset_function(struct pci_dev *dev)
4295{ 4263{
4296 int rc; 4264 int rc;
4297 4265
4298 rc = pci_dev_reset(dev, 1); 4266 rc = pci_probe_reset_function(dev);
4299 if (rc) 4267 if (rc)
4300 return rc; 4268 return rc;
4301 4269
4302 pci_dev_save_and_disable(dev); 4270 if (!pci_dev_trylock(dev))
4271 return -EAGAIN;
4303 4272
4304 if (pci_dev_trylock(dev)) { 4273 pci_dev_save_and_disable(dev);
4305 rc = __pci_dev_reset(dev, 0); 4274 rc = __pci_reset_function_locked(dev);
4306 pci_dev_unlock(dev); 4275 pci_dev_unlock(dev);
4307 } else
4308 rc = -EAGAIN;
4309 4276
4310 pci_dev_restore(dev); 4277 pci_dev_restore(dev);
4311
4312 return rc; 4278 return rc;
4313} 4279}
4314EXPORT_SYMBOL_GPL(pci_try_reset_function); 4280EXPORT_SYMBOL_GPL(pci_try_reset_function);
@@ -4458,7 +4424,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus)
4458 struct pci_dev *dev; 4424 struct pci_dev *dev;
4459 4425
4460 list_for_each_entry(dev, &bus->devices, bus_list) { 4426 list_for_each_entry(dev, &bus->devices, bus_list) {
4427 pci_dev_lock(dev);
4461 pci_dev_save_and_disable(dev); 4428 pci_dev_save_and_disable(dev);
4429 pci_dev_unlock(dev);
4462 if (dev->subordinate) 4430 if (dev->subordinate)
4463 pci_bus_save_and_disable(dev->subordinate); 4431 pci_bus_save_and_disable(dev->subordinate);
4464 } 4432 }
@@ -4473,7 +4441,9 @@ static void pci_bus_restore(struct pci_bus *bus)
4473 struct pci_dev *dev; 4441 struct pci_dev *dev;
4474 4442
4475 list_for_each_entry(dev, &bus->devices, bus_list) { 4443 list_for_each_entry(dev, &bus->devices, bus_list) {
4444 pci_dev_lock(dev);
4476 pci_dev_restore(dev); 4445 pci_dev_restore(dev);
4446 pci_dev_unlock(dev);
4477 if (dev->subordinate) 4447 if (dev->subordinate)
4478 pci_bus_restore(dev->subordinate); 4448 pci_bus_restore(dev->subordinate);
4479 } 4449 }
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f8113e5b9812..93f4044b8f4b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -272,7 +272,6 @@ struct pci_sriov {
272 u16 driver_max_VFs; /* max num VFs driver supports */ 272 u16 driver_max_VFs; /* max num VFs driver supports */
273 struct pci_dev *dev; /* lowest numbered PF */ 273 struct pci_dev *dev; /* lowest numbered PF */
274 struct pci_dev *self; /* this PF */ 274 struct pci_dev *self; /* this PF */
275 struct mutex lock; /* lock for setting sriov_numvfs in sysfs */
276 resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ 275 resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
277 bool drivers_autoprobe; /* auto probing of VFs by driver */ 276 bool drivers_autoprobe; /* auto probing of VFs by driver */
278}; 277};
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index 77d2ca99d2ec..c39f32e42b4d 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -92,7 +92,7 @@ static irqreturn_t dpc_irq(int irq, void *context)
92 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); 92 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
93 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID, 93 pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
94 &source); 94 &source);
95 if (!status) 95 if (!status || status == (u16)(~0))
96 return IRQ_NONE; 96 return IRQ_NONE;
97 97
98 dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n", 98 dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n",
@@ -144,7 +144,7 @@ static int dpc_probe(struct pcie_device *dev)
144 144
145 dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT); 145 dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT);
146 146
147 ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; 147 ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
148 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); 148 pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
149 149
150 dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", 150 dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 587aef36030d..4334fd5d7de9 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -13,10 +13,11 @@
13 13
14#define PCIE_PORT_DEVICE_MAXSERVICES 5 14#define PCIE_PORT_DEVICE_MAXSERVICES 5
15/* 15/*
16 * According to the PCI Express Base Specification 2.0, the indices of 16 * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
17 * the MSI-X table entries used by port services must not exceed 31 17 * be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI
18 * supports a maximum of 32 vectors per function.
18 */ 19 */
19#define PCIE_PORT_MAX_MSIX_ENTRIES 32 20#define PCIE_PORT_MAX_MSI_ENTRIES 32
20 21
21#define get_descriptor_id(type, service) (((type - 4) << 8) | service) 22#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
22 23
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index cea504f6f478..313a21df1692 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -44,14 +44,15 @@ static void release_pcie_device(struct device *dev)
44} 44}
45 45
46/** 46/**
47 * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port 47 * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
48 * for given port
48 * @dev: PCI Express port to handle 49 * @dev: PCI Express port to handle
49 * @irqs: Array of interrupt vectors to populate 50 * @irqs: Array of interrupt vectors to populate
50 * @mask: Bitmask of port capabilities returned by get_port_device_capability() 51 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
51 * 52 *
52 * Return value: 0 on success, error code on failure 53 * Return value: 0 on success, error code on failure
53 */ 54 */
54static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) 55static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
55{ 56{
56 int nr_entries, entry, nvec = 0; 57 int nr_entries, entry, nvec = 0;
57 58
@@ -61,8 +62,8 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
61 * equal to the number of entries this port actually uses, we'll happily 62 * equal to the number of entries this port actually uses, we'll happily
62 * go through without any tricks. 63 * go through without any tricks.
63 */ 64 */
64 nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES, 65 nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
65 PCI_IRQ_MSIX); 66 PCI_IRQ_MSIX | PCI_IRQ_MSI);
66 if (nr_entries < 0) 67 if (nr_entries < 0)
67 return nr_entries; 68 return nr_entries;
68 69
@@ -70,14 +71,19 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
70 u16 reg16; 71 u16 reg16;
71 72
72 /* 73 /*
73 * The code below follows the PCI Express Base Specification 2.0 74 * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event
74 * stating in Section 6.1.6 that "PME and Hot-Plug Event 75 * interrupts (when both are implemented) always share the
75 * interrupts (when both are implemented) always share the same 76 * same MSI or MSI-X vector, as indicated by the Interrupt
76 * MSI or MSI-X vector, as indicated by the Interrupt Message 77 * Message Number field in the PCI Express Capabilities
77 * Number field in the PCI Express Capabilities register", where 78 * register".
78 * according to Section 7.8.2 of the specification "For MSI-X, 79 *
79 * the value in this field indicates which MSI-X Table entry is 80 * Per sec 7.8.2, "For MSI, the [Interrupt Message Number]
80 * used to generate the interrupt message." 81 * indicates the offset between the base Message Data and
82 * the interrupt message that is generated."
83 *
84 * "For MSI-X, the [Interrupt Message Number] indicates
85 * which MSI-X Table entry is used to generate the
86 * interrupt message."
81 */ 87 */
82 pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16); 88 pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
83 entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; 89 entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
@@ -94,13 +100,17 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
94 u32 reg32, pos; 100 u32 reg32, pos;
95 101
96 /* 102 /*
97 * The code below follows Section 7.10.10 of the PCI Express 103 * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt
98 * Base Specification 2.0 stating that bits 31-27 of the Root 104 * Message Number in the Root Error Status register
99 * Error Status Register contain a value indicating which of the 105 * indicates which MSI/MSI-X vector is used for AER.
100 * MSI/MSI-X vectors assigned to the port is going to be used 106 *
101 * for AER, where "For MSI-X, the value in this register 107 * "For MSI, the [Advanced Error Interrupt Message Number]
102 * indicates which MSI-X Table entry is used to generate the 108 * indicates the offset between the base Message Data and
103 * interrupt message." 109 * the interrupt message that is generated."
110 *
111 * "For MSI-X, the [Advanced Error Interrupt Message
112 * Number] indicates which MSI-X Table entry is used to
113 * generate the interrupt message."
104 */ 114 */
105 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 115 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
106 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32); 116 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
@@ -113,6 +123,33 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
113 nvec = max(nvec, entry + 1); 123 nvec = max(nvec, entry + 1);
114 } 124 }
115 125
126 if (mask & PCIE_PORT_SERVICE_DPC) {
127 u16 reg16, pos;
128
129 /*
130 * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt
131 * Message Number in the DPC Capability register indicates
132 * which MSI/MSI-X vector is used for DPC.
133 *
134 * "For MSI, the [DPC Interrupt Message Number] indicates
135 * the offset between the base Message Data and the
136 * interrupt message that is generated."
137 *
138 * "For MSI-X, the [DPC Interrupt Message Number] indicates
139 * which MSI-X Table entry is used to generate the
140 * interrupt message."
141 */
142 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
143 pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, &reg16);
144 entry = reg16 & 0x1f;
145 if (entry >= nr_entries)
146 goto out_free_irqs;
147
148 irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry);
149
150 nvec = max(nvec, entry + 1);
151 }
152
116 /* 153 /*
117 * If nvec is equal to the allocated number of entries, we can just use 154 * If nvec is equal to the allocated number of entries, we can just use
118 * what we have. Otherwise, the port has some extra entries not for the 155 * what we have. Otherwise, the port has some extra entries not for the
@@ -124,7 +161,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
124 161
125 /* Now allocate the MSI-X vectors for real */ 162 /* Now allocate the MSI-X vectors for real */
126 nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, 163 nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
127 PCI_IRQ_MSIX); 164 PCI_IRQ_MSIX | PCI_IRQ_MSI);
128 if (nr_entries < 0) 165 if (nr_entries < 0)
129 return nr_entries; 166 return nr_entries;
130 } 167 }
@@ -146,26 +183,29 @@ out_free_irqs:
146 */ 183 */
147static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) 184static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
148{ 185{
149 unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
150 int ret, i; 186 int ret, i;
151 187
152 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) 188 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
153 irqs[i] = -1; 189 irqs[i] = -1;
154 190
155 /* 191 /*
156 * If MSI cannot be used for PCIe PME or hotplug, we have to use 192 * If we support PME or hotplug, but we can't use MSI/MSI-X for
157 * INTx or other interrupts, e.g. system shared interrupt. 193 * them, we have to fall back to INTx or other interrupts, e.g., a
194 * system shared interrupt.
158 */ 195 */
159 if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) || 196 if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
160 ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) { 197 goto legacy_irq;
161 flags &= ~PCI_IRQ_MSI; 198
162 } else { 199 if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())
163 /* Try to use MSI-X if supported */ 200 goto legacy_irq;
164 if (!pcie_port_enable_msix(dev, irqs, mask)) 201
165 return 0; 202 /* Try to use MSI-X or MSI if supported */
166 } 203 if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
204 return 0;
167 205
168 ret = pci_alloc_irq_vectors(dev, 1, 1, flags); 206legacy_irq:
207 /* fall back to legacy IRQ */
208 ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
169 if (ret < 0) 209 if (ret < 0)
170 return -ENODEV; 210 return -ENODEV;
171 211
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index bd42ed42c199..c31310db0404 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1356,6 +1356,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1356} 1356}
1357 1357
1358/** 1358/**
1359 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1360 * @dev: PCI device
1361 *
1362 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1363 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1364 */
1365static int pci_intx_mask_broken(struct pci_dev *dev)
1366{
1367 u16 orig, toggle, new;
1368
1369 pci_read_config_word(dev, PCI_COMMAND, &orig);
1370 toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1371 pci_write_config_word(dev, PCI_COMMAND, toggle);
1372 pci_read_config_word(dev, PCI_COMMAND, &new);
1373
1374 pci_write_config_word(dev, PCI_COMMAND, orig);
1375
1376 /*
1377 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1378 * r2.3, so strictly speaking, a device is not *broken* if it's not
1379 * writable. But we'll live with the misnomer for now.
1380 */
1381 if (new != toggle)
1382 return 1;
1383 return 0;
1384}
1385
1386/**
1359 * pci_setup_device - fill in class and map information of a device 1387 * pci_setup_device - fill in class and map information of a device
1360 * @dev: the device structure to fill 1388 * @dev: the device structure to fill
1361 * 1389 *
@@ -1425,6 +1453,8 @@ int pci_setup_device(struct pci_dev *dev)
1425 } 1453 }
1426 } 1454 }
1427 1455
1456 dev->broken_intx_masking = pci_intx_mask_broken(dev);
1457
1428 switch (dev->hdr_type) { /* header type */ 1458 switch (dev->hdr_type) { /* header type */
1429 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 1459 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1430 if (class == PCI_CLASS_BRIDGE_PCI) 1460 if (class == PCI_CLASS_BRIDGE_PCI)
@@ -1700,6 +1730,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1700 /* Initialize Advanced Error Capabilities and Control Register */ 1730 /* Initialize Advanced Error Capabilities and Control Register */
1701 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32); 1731 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1702 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; 1732 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1733 /* Don't enable ECRC generation or checking if unsupported */
1734 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1735 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1736 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1737 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1703 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 1738 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1704 1739
1705 /* 1740 /*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 085fb787aa9e..6967c6b4cf6b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
304{ 304{
305 int i; 305 int i;
306 306
307 for (i = 0; i < PCI_STD_RESOURCE_END; i++) { 307 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
308 struct resource *r = &dev->resource[i]; 308 struct resource *r = &dev->resource[i];
309 309
310 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { 310 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
@@ -1684,6 +1684,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1684DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); 1684DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1685DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); 1685DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1686 1686
1687static void quirk_radeon_pm(struct pci_dev *dev)
1688{
1689 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1690 dev->subsystem_device == 0x00e2) {
1691 if (dev->d3_delay < 20) {
1692 dev->d3_delay = 20;
1693 dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n",
1694 dev->d3_delay);
1695 }
1696 }
1697}
1698DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
1699
1687#ifdef CONFIG_X86_IO_APIC 1700#ifdef CONFIG_X86_IO_APIC
1688static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) 1701static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1689{ 1702{
@@ -3236,6 +3249,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
3236 quirk_broken_intx_masking); 3249 quirk_broken_intx_masking);
3237DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, 3250DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
3238 quirk_broken_intx_masking); 3251 quirk_broken_intx_masking);
3252DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
3253 quirk_broken_intx_masking);
3254DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
3255 quirk_broken_intx_masking);
3239DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, 3256DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
3240 quirk_broken_intx_masking); 3257 quirk_broken_intx_masking);
3241DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, 3258DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index cc6e085008fb..af81b2dec42e 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -120,6 +120,13 @@ struct sw_event_regs {
120 u32 reserved16[4]; 120 u32 reserved16[4];
121} __packed; 121} __packed;
122 122
123enum {
124 SWITCHTEC_CFG0_RUNNING = 0x04,
125 SWITCHTEC_CFG1_RUNNING = 0x05,
126 SWITCHTEC_IMG0_RUNNING = 0x03,
127 SWITCHTEC_IMG1_RUNNING = 0x07,
128};
129
123struct sys_info_regs { 130struct sys_info_regs {
124 u32 device_id; 131 u32 device_id;
125 u32 device_version; 132 u32 device_version;
@@ -129,7 +136,9 @@ struct sys_info_regs {
129 u32 table_format_version; 136 u32 table_format_version;
130 u32 partition_id; 137 u32 partition_id;
131 u32 cfg_file_fmt_version; 138 u32 cfg_file_fmt_version;
132 u32 reserved2[58]; 139 u16 cfg_running;
140 u16 img_running;
141 u32 reserved2[57];
133 char vendor_id[8]; 142 char vendor_id[8];
134 char product_id[16]; 143 char product_id[16];
135 char product_revision[4]; 144 char product_revision[4];
@@ -807,6 +816,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
807{ 816{
808 struct switchtec_ioctl_flash_part_info info = {0}; 817 struct switchtec_ioctl_flash_part_info info = {0};
809 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; 818 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
819 struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
810 u32 active_addr = -1; 820 u32 active_addr = -1;
811 821
812 if (copy_from_user(&info, uinfo, sizeof(info))) 822 if (copy_from_user(&info, uinfo, sizeof(info)))
@@ -816,18 +826,26 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
816 case SWITCHTEC_IOCTL_PART_CFG0: 826 case SWITCHTEC_IOCTL_PART_CFG0:
817 active_addr = ioread32(&fi->active_cfg); 827 active_addr = ioread32(&fi->active_cfg);
818 set_fw_info_part(&info, &fi->cfg0); 828 set_fw_info_part(&info, &fi->cfg0);
829 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
830 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
819 break; 831 break;
820 case SWITCHTEC_IOCTL_PART_CFG1: 832 case SWITCHTEC_IOCTL_PART_CFG1:
821 active_addr = ioread32(&fi->active_cfg); 833 active_addr = ioread32(&fi->active_cfg);
822 set_fw_info_part(&info, &fi->cfg1); 834 set_fw_info_part(&info, &fi->cfg1);
835 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
836 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
823 break; 837 break;
824 case SWITCHTEC_IOCTL_PART_IMG0: 838 case SWITCHTEC_IOCTL_PART_IMG0:
825 active_addr = ioread32(&fi->active_img); 839 active_addr = ioread32(&fi->active_img);
826 set_fw_info_part(&info, &fi->img0); 840 set_fw_info_part(&info, &fi->img0);
841 if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
842 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
827 break; 843 break;
828 case SWITCHTEC_IOCTL_PART_IMG1: 844 case SWITCHTEC_IOCTL_PART_IMG1:
829 active_addr = ioread32(&fi->active_img); 845 active_addr = ioread32(&fi->active_img);
830 set_fw_info_part(&info, &fi->img1); 846 set_fw_info_part(&info, &fi->img1);
847 if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
848 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
831 break; 849 break;
832 case SWITCHTEC_IOCTL_PART_NVLOG: 850 case SWITCHTEC_IOCTL_PART_NVLOG:
833 set_fw_info_part(&info, &fi->nvlog); 851 set_fw_info_part(&info, &fi->nvlog);
@@ -861,7 +879,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
861 } 879 }
862 880
863 if (info.address == active_addr) 881 if (info.address == active_addr)
864 info.active = 1; 882 info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
865 883
866 if (copy_to_user(uinfo, &info, sizeof(info))) 884 if (copy_to_user(uinfo, &info, sizeof(info)))
867 return -EFAULT; 885 return -EFAULT;
@@ -1291,7 +1309,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1291 cdev = &stdev->cdev; 1309 cdev = &stdev->cdev;
1292 cdev_init(cdev, &switchtec_fops); 1310 cdev_init(cdev, &switchtec_fops);
1293 cdev->owner = THIS_MODULE; 1311 cdev->owner = THIS_MODULE;
1294 cdev->kobj.parent = &dev->kobj;
1295 1312
1296 return stdev; 1313 return stdev;
1297 1314
@@ -1442,12 +1459,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
1442 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; 1459 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1443 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; 1460 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1444 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; 1461 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1445 stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); 1462 stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1446 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); 1463 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1447 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; 1464 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1448 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; 1465 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1449 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; 1466 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1450 1467
1468 if (stdev->partition_count < 1)
1469 stdev->partition_count = 1;
1470
1451 init_pff(stdev); 1471 init_pff(stdev);
1452 1472
1453 pci_set_drvdata(pdev, stdev); 1473 pci_set_drvdata(pdev, stdev);
@@ -1479,11 +1499,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1479 SWITCHTEC_EVENT_EN_IRQ, 1499 SWITCHTEC_EVENT_EN_IRQ,
1480 &stdev->mmio_part_cfg->mrpc_comp_hdr); 1500 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1481 1501
1482 rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); 1502 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1483 if (rc)
1484 goto err_put;
1485
1486 rc = device_add(&stdev->dev);
1487 if (rc) 1503 if (rc)
1488 goto err_devadd; 1504 goto err_devadd;
1489 1505
@@ -1492,7 +1508,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1492 return 0; 1508 return 0;
1493 1509
1494err_devadd: 1510err_devadd:
1495 cdev_del(&stdev->cdev);
1496 stdev_kill(stdev); 1511 stdev_kill(stdev);
1497err_put: 1512err_put:
1498 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); 1513 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
@@ -1506,8 +1521,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
1506 1521
1507 pci_set_drvdata(pdev, NULL); 1522 pci_set_drvdata(pdev, NULL);
1508 1523
1509 device_del(&stdev->dev); 1524 cdev_device_del(&stdev->cdev, &stdev->dev);
1510 cdev_del(&stdev->cdev);
1511 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); 1525 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1512 dev_info(&stdev->dev, "unregistered.\n"); 1526 dev_info(&stdev->dev, "unregistered.\n");
1513 1527
@@ -1544,6 +1558,24 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
1544 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 1558 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
1545 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 1559 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
1546 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3 1560 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
1561 SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
1562 SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
1563 SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
1564 SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
1565 SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
1566 SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
1567 SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
1568 SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
1569 SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
1570 SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
1571 SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
1572 SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
1573 SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
1574 SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
1575 SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
1576 SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
1577 SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
1578 SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
1547 {0} 1579 {0}
1548}; 1580};
1549MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); 1581MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index b827a8113e26..ff01bed7112f 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -408,7 +408,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
408 if (!base) 408 if (!base)
409 return; 409 return;
410 410
411 for (i = 0; i < PCI_STD_RESOURCE_END; i++) { 411 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
412 struct resource *res = &dev->resource[i]; 412 struct resource *res = &dev->resource[i];
413 413
414 if (!(res->flags & IORESOURCE_MEM)) 414 if (!(res->flags & IORESOURCE_MEM))
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a6fba4804672..0991f973f8ca 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -291,7 +291,7 @@ extern int
291irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 291irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
292 292
293struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); 293struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
294int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); 294int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
295 295
296#else /* CONFIG_SMP */ 296#else /* CONFIG_SMP */
297 297
@@ -331,7 +331,7 @@ irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
331} 331}
332 332
333static inline int 333static inline int
334irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) 334irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
335{ 335{
336 return maxvec; 336 return maxvec;
337} 337}
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 57e0b8250947..782fb8e0755f 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -7,6 +7,7 @@
7 7
8int pci_enable_pri(struct pci_dev *pdev, u32 reqs); 8int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
9void pci_disable_pri(struct pci_dev *pdev); 9void pci_disable_pri(struct pci_dev *pdev);
10void pci_restore_pri_state(struct pci_dev *pdev);
10int pci_reset_pri(struct pci_dev *pdev); 11int pci_reset_pri(struct pci_dev *pdev);
11 12
12#else /* CONFIG_PCI_PRI */ 13#else /* CONFIG_PCI_PRI */
@@ -20,6 +21,10 @@ static inline void pci_disable_pri(struct pci_dev *pdev)
20{ 21{
21} 22}
22 23
24static inline void pci_restore_pri_state(struct pci_dev *pdev)
25{
26}
27
23static inline int pci_reset_pri(struct pci_dev *pdev) 28static inline int pci_reset_pri(struct pci_dev *pdev)
24{ 29{
25 return -ENODEV; 30 return -ENODEV;
@@ -31,6 +36,7 @@ static inline int pci_reset_pri(struct pci_dev *pdev)
31 36
32int pci_enable_pasid(struct pci_dev *pdev, int features); 37int pci_enable_pasid(struct pci_dev *pdev, int features);
33void pci_disable_pasid(struct pci_dev *pdev); 38void pci_disable_pasid(struct pci_dev *pdev);
39void pci_restore_pasid_state(struct pci_dev *pdev);
34int pci_pasid_features(struct pci_dev *pdev); 40int pci_pasid_features(struct pci_dev *pdev);
35int pci_max_pasids(struct pci_dev *pdev); 41int pci_max_pasids(struct pci_dev *pdev);
36 42
@@ -45,6 +51,10 @@ static inline void pci_disable_pasid(struct pci_dev *pdev)
45{ 51{
46} 52}
47 53
54static inline void pci_restore_pasid_state(struct pci_dev *pdev)
55{
56}
57
48static inline int pci_pasid_features(struct pci_dev *pdev) 58static inline int pci_pasid_features(struct pci_dev *pdev)
49{ 59{
50 return -EINVAL; 60 return -EINVAL;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5c1c0ae38dd3..bd68ed11b394 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -183,6 +183,11 @@ enum pci_dev_flags {
183 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 183 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
184 /* Do not use FLR even if device advertises PCI_AF_CAP */ 184 /* Do not use FLR even if device advertises PCI_AF_CAP */
185 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 185 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
186 /*
187 * Resume before calling the driver's system suspend hooks, disabling
188 * the direct_complete optimization.
189 */
190 PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
186}; 191};
187 192
188enum pci_irq_reroute_variant { 193enum pci_irq_reroute_variant {
@@ -356,6 +361,8 @@ struct pci_dev {
356 unsigned int msix_enabled:1; 361 unsigned int msix_enabled:1;
357 unsigned int ari_enabled:1; /* ARI forwarding */ 362 unsigned int ari_enabled:1; /* ARI forwarding */
358 unsigned int ats_enabled:1; /* Address Translation Service */ 363 unsigned int ats_enabled:1; /* Address Translation Service */
364 unsigned int pasid_enabled:1; /* Process Address Space ID */
365 unsigned int pri_enabled:1; /* Page Request Interface */
359 unsigned int is_managed:1; 366 unsigned int is_managed:1;
360 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 367 unsigned int needs_freset:1; /* Dev requires fundamental reset */
361 unsigned int state_saved:1; 368 unsigned int state_saved:1;
@@ -366,7 +373,7 @@ struct pci_dev {
366 unsigned int is_thunderbolt:1; /* Thunderbolt controller */ 373 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
367 unsigned int __aer_firmware_first_valid:1; 374 unsigned int __aer_firmware_first_valid:1;
368 unsigned int __aer_firmware_first:1; 375 unsigned int __aer_firmware_first:1;
369 unsigned int broken_intx_masking:1; 376 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
370 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ 377 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
371 unsigned int irq_managed:1; 378 unsigned int irq_managed:1;
372 unsigned int has_secondary_link:1; 379 unsigned int has_secondary_link:1;
@@ -399,6 +406,12 @@ struct pci_dev {
399 u8 ats_stu; /* ATS Smallest Translation Unit */ 406 u8 ats_stu; /* ATS Smallest Translation Unit */
400 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */ 407 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
401#endif 408#endif
409#ifdef CONFIG_PCI_PRI
410 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
411#endif
412#ifdef CONFIG_PCI_PASID
413 u16 pasid_features;
414#endif
402 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ 415 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
403 size_t romlen; /* Length of ROM if it's not from the BAR */ 416 size_t romlen; /* Length of ROM if it's not from the BAR */
404 char *driver_override; /* Driver name to force a match */ 417 char *driver_override; /* Driver name to force a match */
@@ -694,7 +707,8 @@ struct pci_error_handlers {
694 pci_ers_result_t (*slot_reset)(struct pci_dev *dev); 707 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
695 708
696 /* PCI function reset prepare or completed */ 709 /* PCI function reset prepare or completed */
697 void (*reset_notify)(struct pci_dev *dev, bool prepare); 710 void (*reset_prepare)(struct pci_dev *dev);
711 void (*reset_done)(struct pci_dev *dev);
698 712
699 /* Device driver may resume normal operations */ 713 /* Device driver may resume normal operations */
700 void (*resume)(struct pci_dev *dev); 714 void (*resume)(struct pci_dev *dev);
@@ -1004,6 +1018,15 @@ int __must_check pci_reenable_device(struct pci_dev *);
1004int __must_check pcim_enable_device(struct pci_dev *pdev); 1018int __must_check pcim_enable_device(struct pci_dev *pdev);
1005void pcim_pin_device(struct pci_dev *pdev); 1019void pcim_pin_device(struct pci_dev *pdev);
1006 1020
1021static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1022{
1023 /*
1024 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1025 * writable and no quirk has marked the feature broken.
1026 */
1027 return !pdev->broken_intx_masking;
1028}
1029
1007static inline int pci_is_enabled(struct pci_dev *pdev) 1030static inline int pci_is_enabled(struct pci_dev *pdev)
1008{ 1031{
1009 return (atomic_read(&pdev->enable_cnt) > 0); 1032 return (atomic_read(&pdev->enable_cnt) > 0);
@@ -1027,7 +1050,6 @@ int __must_check pci_set_mwi(struct pci_dev *dev);
1027int pci_try_set_mwi(struct pci_dev *dev); 1050int pci_try_set_mwi(struct pci_dev *dev);
1028void pci_clear_mwi(struct pci_dev *dev); 1051void pci_clear_mwi(struct pci_dev *dev);
1029void pci_intx(struct pci_dev *dev, int enable); 1052void pci_intx(struct pci_dev *dev, int enable);
1030bool pci_intx_mask_supported(struct pci_dev *dev);
1031bool pci_check_and_mask_intx(struct pci_dev *dev); 1053bool pci_check_and_mask_intx(struct pci_dev *dev);
1032bool pci_check_and_unmask_intx(struct pci_dev *dev); 1054bool pci_check_and_unmask_intx(struct pci_dev *dev);
1033int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); 1055int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index d56bb0051009..c22d3ebaca20 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -517,6 +517,7 @@
517#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ 517#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
518#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ 518#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
519#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ 519#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
520#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
520#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ 521#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
521#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ 522#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
522#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ 523#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index 3e824e1a6495..5e392968bad2 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -39,6 +39,9 @@ struct switchtec_ioctl_flash_info {
39 __u32 padding; 39 __u32 padding;
40}; 40};
41 41
42#define SWITCHTEC_IOCTL_PART_ACTIVE 1
43#define SWITCHTEC_IOCTL_PART_RUNNING 2
44
42struct switchtec_ioctl_flash_part_info { 45struct switchtec_ioctl_flash_part_info {
43 __u32 flash_partition; 46 __u32 flash_partition;
44 __u32 address; 47 __u32 address;
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index e2d356dd7581..9b71406d2eec 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -66,6 +66,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
66 struct cpumask *masks; 66 struct cpumask *masks;
67 cpumask_var_t nmsk; 67 cpumask_var_t nmsk;
68 68
69 /*
70 * If there aren't any vectors left after applying the pre/post
71 * vectors don't bother with assigning affinity.
72 */
73 if (!affv)
74 return NULL;
75
69 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) 76 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
70 return NULL; 77 return NULL;
71 78
@@ -140,15 +147,19 @@ out:
140 147
141/** 148/**
142 * irq_calc_affinity_vectors - Calculate the optimal number of vectors 149 * irq_calc_affinity_vectors - Calculate the optimal number of vectors
150 * @minvec: The minimum number of vectors available
143 * @maxvec: The maximum number of vectors available 151 * @maxvec: The maximum number of vectors available
144 * @affd: Description of the affinity requirements 152 * @affd: Description of the affinity requirements
145 */ 153 */
146int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) 154int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
147{ 155{
148 int resv = affd->pre_vectors + affd->post_vectors; 156 int resv = affd->pre_vectors + affd->post_vectors;
149 int vecs = maxvec - resv; 157 int vecs = maxvec - resv;
150 int cpus; 158 int cpus;
151 159
160 if (resv > minvec)
161 return 0;
162
152 /* Stabilize the cpumasks */ 163 /* Stabilize the cpumasks */
153 get_online_cpus(); 164 get_online_cpus();
154 cpus = cpumask_weight(cpu_online_mask); 165 cpus = cpumask_weight(cpu_online_mask);