diff options
Diffstat (limited to 'drivers/pci/controller/dwc')
21 files changed, 9904 insertions, 0 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig new file mode 100644 index 000000000000..16f52c626b4b --- /dev/null +++ b/drivers/pci/controller/dwc/Kconfig | |||
@@ -0,0 +1,197 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | menu "DesignWare PCI Core Support" | ||
4 | depends on PCI | ||
5 | |||
6 | config PCIE_DW | ||
7 | bool | ||
8 | |||
9 | config PCIE_DW_HOST | ||
10 | bool | ||
11 | depends on PCI_MSI_IRQ_DOMAIN | ||
12 | select PCIE_DW | ||
13 | |||
14 | config PCIE_DW_EP | ||
15 | bool | ||
16 | depends on PCI_ENDPOINT | ||
17 | select PCIE_DW | ||
18 | |||
19 | config PCI_DRA7XX | ||
20 | bool | ||
21 | |||
22 | config PCI_DRA7XX_HOST | ||
23 | bool "TI DRA7xx PCIe controller Host Mode" | ||
24 | depends on SOC_DRA7XX || COMPILE_TEST | ||
25 | depends on PCI_MSI_IRQ_DOMAIN | ||
26 | depends on OF && HAS_IOMEM && TI_PIPE3 | ||
27 | select PCIE_DW_HOST | ||
28 | select PCI_DRA7XX | ||
29 | default y | ||
30 | help | ||
31 | Enables support for the PCIe controller in the DRA7xx SoC to work in | ||
32 | host mode. There are two instances of PCIe controller in DRA7xx. | ||
33 | This controller can work either as EP or RC. In order to enable | ||
34 | host-specific features PCI_DRA7XX_HOST must be selected and in order | ||
35 | to enable device-specific features PCI_DRA7XX_EP must be selected. | ||
36 | This uses the DesignWare core. | ||
37 | |||
38 | config PCI_DRA7XX_EP | ||
39 | bool "TI DRA7xx PCIe controller Endpoint Mode" | ||
40 | depends on SOC_DRA7XX || COMPILE_TEST | ||
41 | depends on PCI_ENDPOINT | ||
42 | depends on OF && HAS_IOMEM && TI_PIPE3 | ||
43 | select PCIE_DW_EP | ||
44 | select PCI_DRA7XX | ||
45 | help | ||
46 | Enables support for the PCIe controller in the DRA7xx SoC to work in | ||
47 | endpoint mode. There are two instances of PCIe controller in DRA7xx. | ||
48 | This controller can work either as EP or RC. In order to enable | ||
49 | host-specific features PCI_DRA7XX_HOST must be selected and in order | ||
50 | to enable device-specific features PCI_DRA7XX_EP must be selected. | ||
51 | This uses the DesignWare core. | ||
52 | |||
53 | config PCIE_DW_PLAT | ||
54 | bool | ||
55 | |||
56 | config PCIE_DW_PLAT_HOST | ||
57 | bool "Platform bus based DesignWare PCIe Controller - Host mode" | ||
58 | depends on PCI && PCI_MSI_IRQ_DOMAIN | ||
59 | select PCIE_DW_HOST | ||
60 | select PCIE_DW_PLAT | ||
61 | default y | ||
62 | help | ||
63 | Enables support for the PCIe controller in the Designware IP to | ||
64 | work in host mode. There are two instances of PCIe controller in | ||
65 | Designware IP. | ||
66 | This controller can work either as EP or RC. In order to enable | ||
67 | host-specific features PCIE_DW_PLAT_HOST must be selected and in | ||
68 | order to enable device-specific features PCI_DW_PLAT_EP must be | ||
69 | selected. | ||
70 | |||
71 | config PCIE_DW_PLAT_EP | ||
72 | bool "Platform bus based DesignWare PCIe Controller - Endpoint mode" | ||
73 | depends on PCI && PCI_MSI_IRQ_DOMAIN | ||
74 | depends on PCI_ENDPOINT | ||
75 | select PCIE_DW_EP | ||
76 | select PCIE_DW_PLAT | ||
77 | help | ||
78 | Enables support for the PCIe controller in the Designware IP to | ||
79 | work in endpoint mode. There are two instances of PCIe controller | ||
80 | in Designware IP. | ||
81 | This controller can work either as EP or RC. In order to enable | ||
82 | host-specific features PCIE_DW_PLAT_HOST must be selected and in | ||
83 | order to enable device-specific features PCI_DW_PLAT_EP must be | ||
84 | selected. | ||
85 | |||
86 | config PCI_EXYNOS | ||
87 | bool "Samsung Exynos PCIe controller" | ||
88 | depends on SOC_EXYNOS5440 || COMPILE_TEST | ||
89 | depends on PCI_MSI_IRQ_DOMAIN | ||
90 | select PCIE_DW_HOST | ||
91 | |||
92 | config PCI_IMX6 | ||
93 | bool "Freescale i.MX6 PCIe controller" | ||
94 | depends on SOC_IMX6Q || (ARM && COMPILE_TEST) | ||
95 | depends on PCI_MSI_IRQ_DOMAIN | ||
96 | select PCIE_DW_HOST | ||
97 | |||
98 | config PCIE_SPEAR13XX | ||
99 | bool "STMicroelectronics SPEAr PCIe controller" | ||
100 | depends on ARCH_SPEAR13XX || COMPILE_TEST | ||
101 | depends on PCI_MSI_IRQ_DOMAIN | ||
102 | select PCIE_DW_HOST | ||
103 | help | ||
104 | Say Y here if you want PCIe support on SPEAr13XX SoCs. | ||
105 | |||
106 | config PCI_KEYSTONE | ||
107 | bool "TI Keystone PCIe controller" | ||
108 | depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) | ||
109 | depends on PCI_MSI_IRQ_DOMAIN | ||
110 | select PCIE_DW_HOST | ||
111 | help | ||
112 | Say Y here if you want to enable PCI controller support on Keystone | ||
113 | SoCs. The PCI controller on Keystone is based on DesignWare hardware | ||
114 | and therefore the driver re-uses the DesignWare core functions to | ||
115 | implement the driver. | ||
116 | |||
117 | config PCI_LAYERSCAPE | ||
118 | bool "Freescale Layerscape PCIe controller" | ||
119 | depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) | ||
120 | depends on PCI_MSI_IRQ_DOMAIN | ||
121 | select MFD_SYSCON | ||
122 | select PCIE_DW_HOST | ||
123 | help | ||
124 | Say Y here if you want PCIe controller support on Layerscape SoCs. | ||
125 | |||
126 | config PCI_HISI | ||
127 | depends on OF && (ARM64 || COMPILE_TEST) | ||
128 | bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" | ||
129 | depends on PCI_MSI_IRQ_DOMAIN | ||
130 | select PCIE_DW_HOST | ||
131 | select PCI_HOST_COMMON | ||
132 | help | ||
133 | Say Y here if you want PCIe controller support on HiSilicon | ||
134 | Hip05 and Hip06 SoCs | ||
135 | |||
136 | config PCIE_QCOM | ||
137 | bool "Qualcomm PCIe controller" | ||
138 | depends on OF && (ARCH_QCOM || COMPILE_TEST) | ||
139 | depends on PCI_MSI_IRQ_DOMAIN | ||
140 | select PCIE_DW_HOST | ||
141 | help | ||
142 | Say Y here to enable PCIe controller support on Qualcomm SoCs. The | ||
143 | PCIe controller uses the DesignWare core plus Qualcomm-specific | ||
144 | hardware wrappers. | ||
145 | |||
146 | config PCIE_ARMADA_8K | ||
147 | bool "Marvell Armada-8K PCIe controller" | ||
148 | depends on ARCH_MVEBU || COMPILE_TEST | ||
149 | depends on PCI_MSI_IRQ_DOMAIN | ||
150 | select PCIE_DW_HOST | ||
151 | help | ||
152 | Say Y here if you want to enable PCIe controller support on | ||
153 | Armada-8K SoCs. The PCIe controller on Armada-8K is based on | ||
154 | DesignWare hardware and therefore the driver re-uses the | ||
155 | DesignWare core functions to implement the driver. | ||
156 | |||
157 | config PCIE_ARTPEC6 | ||
158 | bool | ||
159 | |||
160 | config PCIE_ARTPEC6_HOST | ||
161 | bool "Axis ARTPEC-6 PCIe controller Host Mode" | ||
162 | depends on MACH_ARTPEC6 || COMPILE_TEST | ||
163 | depends on PCI_MSI_IRQ_DOMAIN | ||
164 | select PCIE_DW_HOST | ||
165 | select PCIE_ARTPEC6 | ||
166 | help | ||
167 | Enables support for the PCIe controller in the ARTPEC-6 SoC to work in | ||
168 | host mode. This uses the DesignWare core. | ||
169 | |||
170 | config PCIE_ARTPEC6_EP | ||
171 | bool "Axis ARTPEC-6 PCIe controller Endpoint Mode" | ||
172 | depends on MACH_ARTPEC6 || COMPILE_TEST | ||
173 | depends on PCI_ENDPOINT | ||
174 | select PCIE_DW_EP | ||
175 | select PCIE_ARTPEC6 | ||
176 | help | ||
177 | Enables support for the PCIe controller in the ARTPEC-6 SoC to work in | ||
178 | endpoint mode. This uses the DesignWare core. | ||
179 | |||
180 | config PCIE_KIRIN | ||
181 | depends on OF && (ARM64 || COMPILE_TEST) | ||
182 | bool "HiSilicon Kirin series SoCs PCIe controllers" | ||
183 | depends on PCI_MSI_IRQ_DOMAIN | ||
184 | select PCIE_DW_HOST | ||
185 | help | ||
186 | Say Y here if you want PCIe controller support | ||
187 | on HiSilicon Kirin series SoCs. | ||
188 | |||
189 | config PCIE_HISI_STB | ||
190 | bool "HiSilicon STB SoCs PCIe controllers" | ||
191 | depends on ARCH_HISI || COMPILE_TEST | ||
192 | depends on PCI_MSI_IRQ_DOMAIN | ||
193 | select PCIE_DW_HOST | ||
194 | help | ||
195 | Say Y here if you want PCIe controller support on HiSilicon STB SoCs | ||
196 | |||
197 | endmenu | ||
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile new file mode 100644 index 000000000000..5d2ce72c7a52 --- /dev/null +++ b/drivers/pci/controller/dwc/Makefile | |||
@@ -0,0 +1,30 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o | ||
3 | obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o | ||
4 | obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o | ||
5 | obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o | ||
6 | obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o | ||
7 | obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o | ||
8 | obj-$(CONFIG_PCI_IMX6) += pci-imx6.o | ||
9 | obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o | ||
10 | obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o | ||
11 | obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o | ||
12 | obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o | ||
13 | obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o | ||
14 | obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o | ||
15 | obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o | ||
16 | obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o | ||
17 | |||
18 | # The following drivers are for devices that use the generic ACPI | ||
19 | # pci_root.c driver but don't support standard ECAM config access. | ||
20 | # They contain MCFG quirks to replace the generic ECAM accessors with | ||
21 | # device-specific ones that are shared with the DT driver. | ||
22 | |||
23 | # The ACPI driver is generic and should not require driver-specific | ||
24 | # config options to be enabled, so we always build these drivers on | ||
25 | # ARM64 and use internal ifdefs to only build the pieces we need | ||
26 | # depending on whether ACPI, the DT driver, or both are enabled. | ||
27 | |||
28 | ifdef CONFIG_PCI | ||
29 | obj-$(CONFIG_ARM64) += pcie-hisi.o | ||
30 | endif | ||
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c new file mode 100644 index 000000000000..cfaeef81d868 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-dra7xx.c | |||
@@ -0,0 +1,846 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs | ||
4 | * | ||
5 | * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com | ||
6 | * | ||
7 | * Authors: Kishon Vijay Abraham I <kishon@ti.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include <linux/device.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/irqdomain.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/of_device.h> | ||
19 | #include <linux/of_gpio.h> | ||
20 | #include <linux/of_pci.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/phy/phy.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/pm_runtime.h> | ||
25 | #include <linux/resource.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/mfd/syscon.h> | ||
28 | #include <linux/regmap.h> | ||
29 | |||
30 | #include "../../pci.h" | ||
31 | #include "pcie-designware.h" | ||
32 | |||
33 | /* PCIe controller wrapper DRA7XX configuration registers */ | ||
34 | |||
35 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 | ||
36 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 | ||
37 | #define ERR_SYS BIT(0) | ||
38 | #define ERR_FATAL BIT(1) | ||
39 | #define ERR_NONFATAL BIT(2) | ||
40 | #define ERR_COR BIT(3) | ||
41 | #define ERR_AXI BIT(4) | ||
42 | #define ERR_ECRC BIT(5) | ||
43 | #define PME_TURN_OFF BIT(8) | ||
44 | #define PME_TO_ACK BIT(9) | ||
45 | #define PM_PME BIT(10) | ||
46 | #define LINK_REQ_RST BIT(11) | ||
47 | #define LINK_UP_EVT BIT(12) | ||
48 | #define CFG_BME_EVT BIT(13) | ||
49 | #define CFG_MSE_EVT BIT(14) | ||
50 | #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ | ||
51 | ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ | ||
52 | LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) | ||
53 | |||
54 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 | ||
55 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 | ||
56 | #define INTA BIT(0) | ||
57 | #define INTB BIT(1) | ||
58 | #define INTC BIT(2) | ||
59 | #define INTD BIT(3) | ||
60 | #define MSI BIT(4) | ||
61 | #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) | ||
62 | |||
63 | #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 | ||
64 | #define DEVICE_TYPE_EP 0x0 | ||
65 | #define DEVICE_TYPE_LEG_EP 0x1 | ||
66 | #define DEVICE_TYPE_RC 0x4 | ||
67 | |||
68 | #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 | ||
69 | #define LTSSM_EN 0x1 | ||
70 | |||
71 | #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C | ||
72 | #define LINK_UP BIT(16) | ||
73 | #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF | ||
74 | |||
75 | #define EXP_CAP_ID_OFFSET 0x70 | ||
76 | |||
77 | #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 | ||
78 | #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 | ||
79 | |||
80 | #define PCIECTRL_TI_CONF_MSI_XMT 0x012c | ||
81 | #define MSI_REQ_GRANT BIT(0) | ||
82 | #define MSI_VECTOR_SHIFT 7 | ||
83 | |||
84 | struct dra7xx_pcie { | ||
85 | struct dw_pcie *pci; | ||
86 | void __iomem *base; /* DT ti_conf */ | ||
87 | int phy_count; /* DT phy-names count */ | ||
88 | struct phy **phy; | ||
89 | int link_gen; | ||
90 | struct irq_domain *irq_domain; | ||
91 | enum dw_pcie_device_mode mode; | ||
92 | }; | ||
93 | |||
94 | struct dra7xx_pcie_of_data { | ||
95 | enum dw_pcie_device_mode mode; | ||
96 | }; | ||
97 | |||
98 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) | ||
99 | |||
100 | static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) | ||
101 | { | ||
102 | return readl(pcie->base + offset); | ||
103 | } | ||
104 | |||
105 | static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, | ||
106 | u32 value) | ||
107 | { | ||
108 | writel(value, pcie->base + offset); | ||
109 | } | ||
110 | |||
111 | static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) | ||
112 | { | ||
113 | return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; | ||
114 | } | ||
115 | |||
116 | static int dra7xx_pcie_link_up(struct dw_pcie *pci) | ||
117 | { | ||
118 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
119 | u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); | ||
120 | |||
121 | return !!(reg & LINK_UP); | ||
122 | } | ||
123 | |||
124 | static void dra7xx_pcie_stop_link(struct dw_pcie *pci) | ||
125 | { | ||
126 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
127 | u32 reg; | ||
128 | |||
129 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | ||
130 | reg &= ~LTSSM_EN; | ||
131 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | ||
132 | } | ||
133 | |||
134 | static int dra7xx_pcie_establish_link(struct dw_pcie *pci) | ||
135 | { | ||
136 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
137 | struct device *dev = pci->dev; | ||
138 | u32 reg; | ||
139 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; | ||
140 | |||
141 | if (dw_pcie_link_up(pci)) { | ||
142 | dev_err(dev, "link is already up\n"); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | if (dra7xx->link_gen == 1) { | ||
147 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, | ||
148 | 4, ®); | ||
149 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
150 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
151 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
152 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
153 | PCI_EXP_LNKCAP, 4, reg); | ||
154 | } | ||
155 | |||
156 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, | ||
157 | 2, ®); | ||
158 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
159 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
160 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
161 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
162 | PCI_EXP_LNKCTL2, 2, reg); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | ||
167 | reg |= LTSSM_EN; | ||
168 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) | ||
174 | { | ||
175 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, | ||
176 | LEG_EP_INTERRUPTS | MSI); | ||
177 | |||
178 | dra7xx_pcie_writel(dra7xx, | ||
179 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, | ||
180 | MSI | LEG_EP_INTERRUPTS); | ||
181 | } | ||
182 | |||
183 | static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) | ||
184 | { | ||
185 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, | ||
186 | INTERRUPTS); | ||
187 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, | ||
188 | INTERRUPTS); | ||
189 | } | ||
190 | |||
191 | static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) | ||
192 | { | ||
193 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | ||
194 | dra7xx_pcie_enable_msi_interrupts(dra7xx); | ||
195 | } | ||
196 | |||
197 | static int dra7xx_pcie_host_init(struct pcie_port *pp) | ||
198 | { | ||
199 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
200 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
201 | |||
202 | dw_pcie_setup_rc(pp); | ||
203 | |||
204 | dra7xx_pcie_establish_link(pci); | ||
205 | dw_pcie_wait_for_link(pci); | ||
206 | dw_pcie_msi_init(pp); | ||
207 | dra7xx_pcie_enable_interrupts(dra7xx); | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { | ||
213 | .host_init = dra7xx_pcie_host_init, | ||
214 | }; | ||
215 | |||
216 | static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | ||
217 | irq_hw_number_t hwirq) | ||
218 | { | ||
219 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | ||
220 | irq_set_chip_data(irq, domain->host_data); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static const struct irq_domain_ops intx_domain_ops = { | ||
226 | .map = dra7xx_pcie_intx_map, | ||
227 | .xlate = pci_irqd_intx_xlate, | ||
228 | }; | ||
229 | |||
230 | static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) | ||
231 | { | ||
232 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
233 | struct device *dev = pci->dev; | ||
234 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
235 | struct device_node *node = dev->of_node; | ||
236 | struct device_node *pcie_intc_node = of_get_next_child(node, NULL); | ||
237 | |||
238 | if (!pcie_intc_node) { | ||
239 | dev_err(dev, "No PCIe Intc node found\n"); | ||
240 | return -ENODEV; | ||
241 | } | ||
242 | |||
243 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | ||
244 | &intx_domain_ops, pp); | ||
245 | if (!dra7xx->irq_domain) { | ||
246 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | ||
247 | return -ENODEV; | ||
248 | } | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) | ||
254 | { | ||
255 | struct dra7xx_pcie *dra7xx = arg; | ||
256 | struct dw_pcie *pci = dra7xx->pci; | ||
257 | struct pcie_port *pp = &pci->pp; | ||
258 | unsigned long reg; | ||
259 | u32 virq, bit; | ||
260 | |||
261 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); | ||
262 | |||
263 | switch (reg) { | ||
264 | case MSI: | ||
265 | dw_handle_msi_irq(pp); | ||
266 | break; | ||
267 | case INTA: | ||
268 | case INTB: | ||
269 | case INTC: | ||
270 | case INTD: | ||
271 | for_each_set_bit(bit, ®, PCI_NUM_INTX) { | ||
272 | virq = irq_find_mapping(dra7xx->irq_domain, bit); | ||
273 | if (virq) | ||
274 | generic_handle_irq(virq); | ||
275 | } | ||
276 | break; | ||
277 | } | ||
278 | |||
279 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); | ||
280 | |||
281 | return IRQ_HANDLED; | ||
282 | } | ||
283 | |||
284 | static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) | ||
285 | { | ||
286 | struct dra7xx_pcie *dra7xx = arg; | ||
287 | struct dw_pcie *pci = dra7xx->pci; | ||
288 | struct device *dev = pci->dev; | ||
289 | struct dw_pcie_ep *ep = &pci->ep; | ||
290 | u32 reg; | ||
291 | |||
292 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); | ||
293 | |||
294 | if (reg & ERR_SYS) | ||
295 | dev_dbg(dev, "System Error\n"); | ||
296 | |||
297 | if (reg & ERR_FATAL) | ||
298 | dev_dbg(dev, "Fatal Error\n"); | ||
299 | |||
300 | if (reg & ERR_NONFATAL) | ||
301 | dev_dbg(dev, "Non Fatal Error\n"); | ||
302 | |||
303 | if (reg & ERR_COR) | ||
304 | dev_dbg(dev, "Correctable Error\n"); | ||
305 | |||
306 | if (reg & ERR_AXI) | ||
307 | dev_dbg(dev, "AXI tag lookup fatal Error\n"); | ||
308 | |||
309 | if (reg & ERR_ECRC) | ||
310 | dev_dbg(dev, "ECRC Error\n"); | ||
311 | |||
312 | if (reg & PME_TURN_OFF) | ||
313 | dev_dbg(dev, | ||
314 | "Power Management Event Turn-Off message received\n"); | ||
315 | |||
316 | if (reg & PME_TO_ACK) | ||
317 | dev_dbg(dev, | ||
318 | "Power Management Turn-Off Ack message received\n"); | ||
319 | |||
320 | if (reg & PM_PME) | ||
321 | dev_dbg(dev, "PM Power Management Event message received\n"); | ||
322 | |||
323 | if (reg & LINK_REQ_RST) | ||
324 | dev_dbg(dev, "Link Request Reset\n"); | ||
325 | |||
326 | if (reg & LINK_UP_EVT) { | ||
327 | if (dra7xx->mode == DW_PCIE_EP_TYPE) | ||
328 | dw_pcie_ep_linkup(ep); | ||
329 | dev_dbg(dev, "Link-up state change\n"); | ||
330 | } | ||
331 | |||
332 | if (reg & CFG_BME_EVT) | ||
333 | dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); | ||
334 | |||
335 | if (reg & CFG_MSE_EVT) | ||
336 | dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); | ||
337 | |||
338 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); | ||
339 | |||
340 | return IRQ_HANDLED; | ||
341 | } | ||
342 | |||
343 | static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) | ||
344 | { | ||
345 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
346 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
347 | enum pci_barno bar; | ||
348 | |||
349 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
350 | dw_pcie_ep_reset_bar(pci, bar); | ||
351 | |||
352 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | ||
353 | } | ||
354 | |||
355 | static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) | ||
356 | { | ||
357 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); | ||
358 | mdelay(1); | ||
359 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); | ||
360 | } | ||
361 | |||
362 | static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, | ||
363 | u8 interrupt_num) | ||
364 | { | ||
365 | u32 reg; | ||
366 | |||
367 | reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; | ||
368 | reg |= MSI_REQ_GRANT; | ||
369 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); | ||
370 | } | ||
371 | |||
372 | static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
373 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
374 | { | ||
375 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
376 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
377 | |||
378 | switch (type) { | ||
379 | case PCI_EPC_IRQ_LEGACY: | ||
380 | dra7xx_pcie_raise_legacy_irq(dra7xx); | ||
381 | break; | ||
382 | case PCI_EPC_IRQ_MSI: | ||
383 | dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); | ||
384 | break; | ||
385 | default: | ||
386 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
387 | } | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static struct dw_pcie_ep_ops pcie_ep_ops = { | ||
393 | .ep_init = dra7xx_pcie_ep_init, | ||
394 | .raise_irq = dra7xx_pcie_raise_irq, | ||
395 | }; | ||
396 | |||
397 | static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, | ||
398 | struct platform_device *pdev) | ||
399 | { | ||
400 | int ret; | ||
401 | struct dw_pcie_ep *ep; | ||
402 | struct resource *res; | ||
403 | struct device *dev = &pdev->dev; | ||
404 | struct dw_pcie *pci = dra7xx->pci; | ||
405 | |||
406 | ep = &pci->ep; | ||
407 | ep->ops = &pcie_ep_ops; | ||
408 | |||
409 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); | ||
410 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
411 | if (IS_ERR(pci->dbi_base)) | ||
412 | return PTR_ERR(pci->dbi_base); | ||
413 | |||
414 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); | ||
415 | pci->dbi_base2 = devm_ioremap_resource(dev, res); | ||
416 | if (IS_ERR(pci->dbi_base2)) | ||
417 | return PTR_ERR(pci->dbi_base2); | ||
418 | |||
419 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
420 | if (!res) | ||
421 | return -EINVAL; | ||
422 | |||
423 | ep->phys_base = res->start; | ||
424 | ep->addr_size = resource_size(res); | ||
425 | |||
426 | ret = dw_pcie_ep_init(ep); | ||
427 | if (ret) { | ||
428 | dev_err(dev, "failed to initialize endpoint\n"); | ||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, | ||
436 | struct platform_device *pdev) | ||
437 | { | ||
438 | int ret; | ||
439 | struct dw_pcie *pci = dra7xx->pci; | ||
440 | struct pcie_port *pp = &pci->pp; | ||
441 | struct device *dev = pci->dev; | ||
442 | struct resource *res; | ||
443 | |||
444 | pp->irq = platform_get_irq(pdev, 1); | ||
445 | if (pp->irq < 0) { | ||
446 | dev_err(dev, "missing IRQ resource\n"); | ||
447 | return pp->irq; | ||
448 | } | ||
449 | |||
450 | ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, | ||
451 | IRQF_SHARED | IRQF_NO_THREAD, | ||
452 | "dra7-pcie-msi", dra7xx); | ||
453 | if (ret) { | ||
454 | dev_err(dev, "failed to request irq\n"); | ||
455 | return ret; | ||
456 | } | ||
457 | |||
458 | ret = dra7xx_pcie_init_irq_domain(pp); | ||
459 | if (ret < 0) | ||
460 | return ret; | ||
461 | |||
462 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); | ||
463 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
464 | if (IS_ERR(pci->dbi_base)) | ||
465 | return PTR_ERR(pci->dbi_base); | ||
466 | |||
467 | pp->ops = &dra7xx_pcie_host_ops; | ||
468 | |||
469 | ret = dw_pcie_host_init(pp); | ||
470 | if (ret) { | ||
471 | dev_err(dev, "failed to initialize host\n"); | ||
472 | return ret; | ||
473 | } | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
479 | .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, | ||
480 | .start_link = dra7xx_pcie_establish_link, | ||
481 | .stop_link = dra7xx_pcie_stop_link, | ||
482 | .link_up = dra7xx_pcie_link_up, | ||
483 | }; | ||
484 | |||
485 | static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) | ||
486 | { | ||
487 | int phy_count = dra7xx->phy_count; | ||
488 | |||
489 | while (phy_count--) { | ||
490 | phy_power_off(dra7xx->phy[phy_count]); | ||
491 | phy_exit(dra7xx->phy[phy_count]); | ||
492 | } | ||
493 | } | ||
494 | |||
495 | static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) | ||
496 | { | ||
497 | int phy_count = dra7xx->phy_count; | ||
498 | int ret; | ||
499 | int i; | ||
500 | |||
501 | for (i = 0; i < phy_count; i++) { | ||
502 | ret = phy_init(dra7xx->phy[i]); | ||
503 | if (ret < 0) | ||
504 | goto err_phy; | ||
505 | |||
506 | ret = phy_power_on(dra7xx->phy[i]); | ||
507 | if (ret < 0) { | ||
508 | phy_exit(dra7xx->phy[i]); | ||
509 | goto err_phy; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | return 0; | ||
514 | |||
515 | err_phy: | ||
516 | while (--i >= 0) { | ||
517 | phy_power_off(dra7xx->phy[i]); | ||
518 | phy_exit(dra7xx->phy[i]); | ||
519 | } | ||
520 | |||
521 | return ret; | ||
522 | } | ||
523 | |||
524 | static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { | ||
525 | .mode = DW_PCIE_RC_TYPE, | ||
526 | }; | ||
527 | |||
528 | static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { | ||
529 | .mode = DW_PCIE_EP_TYPE, | ||
530 | }; | ||
531 | |||
532 | static const struct of_device_id of_dra7xx_pcie_match[] = { | ||
533 | { | ||
534 | .compatible = "ti,dra7-pcie", | ||
535 | .data = &dra7xx_pcie_rc_of_data, | ||
536 | }, | ||
537 | { | ||
538 | .compatible = "ti,dra7-pcie-ep", | ||
539 | .data = &dra7xx_pcie_ep_of_data, | ||
540 | }, | ||
541 | {}, | ||
542 | }; | ||
543 | |||
544 | /* | ||
545 | * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 | ||
546 | * @dra7xx: the dra7xx device where the workaround should be applied | ||
547 | * | ||
548 | * Access to the PCIe slave port that are not 32-bit aligned will result | ||
549 | * in incorrect mapping to TLP Address and Byte enable fields. Therefore, | ||
550 | * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or | ||
551 | * 0x3. | ||
552 | * | ||
553 | * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. | ||
554 | */ | ||
555 | static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) | ||
556 | { | ||
557 | int ret; | ||
558 | struct device_node *np = dev->of_node; | ||
559 | struct of_phandle_args args; | ||
560 | struct regmap *regmap; | ||
561 | |||
562 | regmap = syscon_regmap_lookup_by_phandle(np, | ||
563 | "ti,syscon-unaligned-access"); | ||
564 | if (IS_ERR(regmap)) { | ||
565 | dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); | ||
566 | return -EINVAL; | ||
567 | } | ||
568 | |||
569 | ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", | ||
570 | 2, 0, &args); | ||
571 | if (ret) { | ||
572 | dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); | ||
573 | return ret; | ||
574 | } | ||
575 | |||
576 | ret = regmap_update_bits(regmap, args.args[0], args.args[1], | ||
577 | args.args[1]); | ||
578 | if (ret) | ||
579 | dev_err(dev, "failed to enable unaligned access\n"); | ||
580 | |||
581 | of_node_put(args.np); | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | |||
586 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) | ||
587 | { | ||
588 | u32 reg; | ||
589 | int ret; | ||
590 | int irq; | ||
591 | int i; | ||
592 | int phy_count; | ||
593 | struct phy **phy; | ||
594 | struct device_link **link; | ||
595 | void __iomem *base; | ||
596 | struct resource *res; | ||
597 | struct dw_pcie *pci; | ||
598 | struct dra7xx_pcie *dra7xx; | ||
599 | struct device *dev = &pdev->dev; | ||
600 | struct device_node *np = dev->of_node; | ||
601 | char name[10]; | ||
602 | struct gpio_desc *reset; | ||
603 | const struct of_device_id *match; | ||
604 | const struct dra7xx_pcie_of_data *data; | ||
605 | enum dw_pcie_device_mode mode; | ||
606 | |||
607 | match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); | ||
608 | if (!match) | ||
609 | return -EINVAL; | ||
610 | |||
611 | data = (struct dra7xx_pcie_of_data *)match->data; | ||
612 | mode = (enum dw_pcie_device_mode)data->mode; | ||
613 | |||
614 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); | ||
615 | if (!dra7xx) | ||
616 | return -ENOMEM; | ||
617 | |||
618 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
619 | if (!pci) | ||
620 | return -ENOMEM; | ||
621 | |||
622 | pci->dev = dev; | ||
623 | pci->ops = &dw_pcie_ops; | ||
624 | |||
625 | irq = platform_get_irq(pdev, 0); | ||
626 | if (irq < 0) { | ||
627 | dev_err(dev, "missing IRQ resource: %d\n", irq); | ||
628 | return irq; | ||
629 | } | ||
630 | |||
631 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); | ||
632 | base = devm_ioremap_nocache(dev, res->start, resource_size(res)); | ||
633 | if (!base) | ||
634 | return -ENOMEM; | ||
635 | |||
636 | phy_count = of_property_count_strings(np, "phy-names"); | ||
637 | if (phy_count < 0) { | ||
638 | dev_err(dev, "unable to find the strings\n"); | ||
639 | return phy_count; | ||
640 | } | ||
641 | |||
642 | phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); | ||
643 | if (!phy) | ||
644 | return -ENOMEM; | ||
645 | |||
646 | link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); | ||
647 | if (!link) | ||
648 | return -ENOMEM; | ||
649 | |||
650 | for (i = 0; i < phy_count; i++) { | ||
651 | snprintf(name, sizeof(name), "pcie-phy%d", i); | ||
652 | phy[i] = devm_phy_get(dev, name); | ||
653 | if (IS_ERR(phy[i])) | ||
654 | return PTR_ERR(phy[i]); | ||
655 | |||
656 | link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); | ||
657 | if (!link[i]) { | ||
658 | ret = -EINVAL; | ||
659 | goto err_link; | ||
660 | } | ||
661 | } | ||
662 | |||
663 | dra7xx->base = base; | ||
664 | dra7xx->phy = phy; | ||
665 | dra7xx->pci = pci; | ||
666 | dra7xx->phy_count = phy_count; | ||
667 | |||
668 | ret = dra7xx_pcie_enable_phy(dra7xx); | ||
669 | if (ret) { | ||
670 | dev_err(dev, "failed to enable phy\n"); | ||
671 | return ret; | ||
672 | } | ||
673 | |||
674 | platform_set_drvdata(pdev, dra7xx); | ||
675 | |||
676 | pm_runtime_enable(dev); | ||
677 | ret = pm_runtime_get_sync(dev); | ||
678 | if (ret < 0) { | ||
679 | dev_err(dev, "pm_runtime_get_sync failed\n"); | ||
680 | goto err_get_sync; | ||
681 | } | ||
682 | |||
683 | reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); | ||
684 | if (IS_ERR(reset)) { | ||
685 | ret = PTR_ERR(reset); | ||
686 | dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); | ||
687 | goto err_gpio; | ||
688 | } | ||
689 | |||
690 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | ||
691 | reg &= ~LTSSM_EN; | ||
692 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | ||
693 | |||
694 | dra7xx->link_gen = of_pci_get_max_link_speed(np); | ||
695 | if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) | ||
696 | dra7xx->link_gen = 2; | ||
697 | |||
698 | switch (mode) { | ||
699 | case DW_PCIE_RC_TYPE: | ||
700 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { | ||
701 | ret = -ENODEV; | ||
702 | goto err_gpio; | ||
703 | } | ||
704 | |||
705 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, | ||
706 | DEVICE_TYPE_RC); | ||
707 | ret = dra7xx_add_pcie_port(dra7xx, pdev); | ||
708 | if (ret < 0) | ||
709 | goto err_gpio; | ||
710 | break; | ||
711 | case DW_PCIE_EP_TYPE: | ||
712 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { | ||
713 | ret = -ENODEV; | ||
714 | goto err_gpio; | ||
715 | } | ||
716 | |||
717 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, | ||
718 | DEVICE_TYPE_EP); | ||
719 | |||
720 | ret = dra7xx_pcie_ep_unaligned_memaccess(dev); | ||
721 | if (ret) | ||
722 | goto err_gpio; | ||
723 | |||
724 | ret = dra7xx_add_pcie_ep(dra7xx, pdev); | ||
725 | if (ret < 0) | ||
726 | goto err_gpio; | ||
727 | break; | ||
728 | default: | ||
729 | dev_err(dev, "INVALID device type %d\n", mode); | ||
730 | } | ||
731 | dra7xx->mode = mode; | ||
732 | |||
733 | ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, | ||
734 | IRQF_SHARED, "dra7xx-pcie-main", dra7xx); | ||
735 | if (ret) { | ||
736 | dev_err(dev, "failed to request irq\n"); | ||
737 | goto err_gpio; | ||
738 | } | ||
739 | |||
740 | return 0; | ||
741 | |||
742 | err_gpio: | ||
743 | pm_runtime_put(dev); | ||
744 | |||
745 | err_get_sync: | ||
746 | pm_runtime_disable(dev); | ||
747 | dra7xx_pcie_disable_phy(dra7xx); | ||
748 | |||
749 | err_link: | ||
750 | while (--i >= 0) | ||
751 | device_link_del(link[i]); | ||
752 | |||
753 | return ret; | ||
754 | } | ||
755 | |||
756 | #ifdef CONFIG_PM_SLEEP | ||
757 | static int dra7xx_pcie_suspend(struct device *dev) | ||
758 | { | ||
759 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
760 | struct dw_pcie *pci = dra7xx->pci; | ||
761 | u32 val; | ||
762 | |||
763 | if (dra7xx->mode != DW_PCIE_RC_TYPE) | ||
764 | return 0; | ||
765 | |||
766 | /* clear MSE */ | ||
767 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | ||
768 | val &= ~PCI_COMMAND_MEMORY; | ||
769 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static int dra7xx_pcie_resume(struct device *dev) | ||
775 | { | ||
776 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
777 | struct dw_pcie *pci = dra7xx->pci; | ||
778 | u32 val; | ||
779 | |||
780 | if (dra7xx->mode != DW_PCIE_RC_TYPE) | ||
781 | return 0; | ||
782 | |||
783 | /* set MSE */ | ||
784 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | ||
785 | val |= PCI_COMMAND_MEMORY; | ||
786 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); | ||
787 | |||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static int dra7xx_pcie_suspend_noirq(struct device *dev) | ||
792 | { | ||
793 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
794 | |||
795 | dra7xx_pcie_disable_phy(dra7xx); | ||
796 | |||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | static int dra7xx_pcie_resume_noirq(struct device *dev) | ||
801 | { | ||
802 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
803 | int ret; | ||
804 | |||
805 | ret = dra7xx_pcie_enable_phy(dra7xx); | ||
806 | if (ret) { | ||
807 | dev_err(dev, "failed to enable phy\n"); | ||
808 | return ret; | ||
809 | } | ||
810 | |||
811 | return 0; | ||
812 | } | ||
813 | #endif | ||
814 | |||
815 | static void dra7xx_pcie_shutdown(struct platform_device *pdev) | ||
816 | { | ||
817 | struct device *dev = &pdev->dev; | ||
818 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
819 | int ret; | ||
820 | |||
821 | dra7xx_pcie_stop_link(dra7xx->pci); | ||
822 | |||
823 | ret = pm_runtime_put_sync(dev); | ||
824 | if (ret < 0) | ||
825 | dev_dbg(dev, "pm_runtime_put_sync failed\n"); | ||
826 | |||
827 | pm_runtime_disable(dev); | ||
828 | dra7xx_pcie_disable_phy(dra7xx); | ||
829 | } | ||
830 | |||
831 | static const struct dev_pm_ops dra7xx_pcie_pm_ops = { | ||
832 | SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) | ||
833 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, | ||
834 | dra7xx_pcie_resume_noirq) | ||
835 | }; | ||
836 | |||
837 | static struct platform_driver dra7xx_pcie_driver = { | ||
838 | .driver = { | ||
839 | .name = "dra7-pcie", | ||
840 | .of_match_table = of_dra7xx_pcie_match, | ||
841 | .suppress_bind_attrs = true, | ||
842 | .pm = &dra7xx_pcie_pm_ops, | ||
843 | }, | ||
844 | .shutdown = dra7xx_pcie_shutdown, | ||
845 | }; | ||
846 | builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); | ||
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c new file mode 100644 index 000000000000..4cc1e5df8c79 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-exynos.c | |||
@@ -0,0 +1,539 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for Samsung EXYNOS SoCs | ||
4 | * | ||
5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
6 | * http://www.samsung.com | ||
7 | * | ||
8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/gpio.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/of_gpio.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/phy/phy.h> | ||
22 | #include <linux/resource.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/types.h> | ||
25 | |||
26 | #include "pcie-designware.h" | ||
27 | |||
28 | #define to_exynos_pcie(x) dev_get_drvdata((x)->dev) | ||
29 | |||
30 | /* PCIe ELBI registers */ | ||
31 | #define PCIE_IRQ_PULSE 0x000 | ||
32 | #define IRQ_INTA_ASSERT BIT(0) | ||
33 | #define IRQ_INTB_ASSERT BIT(2) | ||
34 | #define IRQ_INTC_ASSERT BIT(4) | ||
35 | #define IRQ_INTD_ASSERT BIT(6) | ||
36 | #define PCIE_IRQ_LEVEL 0x004 | ||
37 | #define PCIE_IRQ_SPECIAL 0x008 | ||
38 | #define PCIE_IRQ_EN_PULSE 0x00c | ||
39 | #define PCIE_IRQ_EN_LEVEL 0x010 | ||
40 | #define IRQ_MSI_ENABLE BIT(2) | ||
41 | #define PCIE_IRQ_EN_SPECIAL 0x014 | ||
42 | #define PCIE_PWR_RESET 0x018 | ||
43 | #define PCIE_CORE_RESET 0x01c | ||
44 | #define PCIE_CORE_RESET_ENABLE BIT(0) | ||
45 | #define PCIE_STICKY_RESET 0x020 | ||
46 | #define PCIE_NONSTICKY_RESET 0x024 | ||
47 | #define PCIE_APP_INIT_RESET 0x028 | ||
48 | #define PCIE_APP_LTSSM_ENABLE 0x02c | ||
49 | #define PCIE_ELBI_RDLH_LINKUP 0x064 | ||
50 | #define PCIE_ELBI_LTSSM_ENABLE 0x1 | ||
51 | #define PCIE_ELBI_SLV_AWMISC 0x11c | ||
52 | #define PCIE_ELBI_SLV_ARMISC 0x120 | ||
53 | #define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) | ||
54 | |||
55 | struct exynos_pcie_mem_res { | ||
56 | void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */ | ||
57 | }; | ||
58 | |||
59 | struct exynos_pcie_clk_res { | ||
60 | struct clk *clk; | ||
61 | struct clk *bus_clk; | ||
62 | }; | ||
63 | |||
64 | struct exynos_pcie { | ||
65 | struct dw_pcie *pci; | ||
66 | struct exynos_pcie_mem_res *mem_res; | ||
67 | struct exynos_pcie_clk_res *clk_res; | ||
68 | const struct exynos_pcie_ops *ops; | ||
69 | int reset_gpio; | ||
70 | |||
71 | struct phy *phy; | ||
72 | }; | ||
73 | |||
74 | struct exynos_pcie_ops { | ||
75 | int (*get_mem_resources)(struct platform_device *pdev, | ||
76 | struct exynos_pcie *ep); | ||
77 | int (*get_clk_resources)(struct exynos_pcie *ep); | ||
78 | int (*init_clk_resources)(struct exynos_pcie *ep); | ||
79 | void (*deinit_clk_resources)(struct exynos_pcie *ep); | ||
80 | }; | ||
81 | |||
82 | static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, | ||
83 | struct exynos_pcie *ep) | ||
84 | { | ||
85 | struct dw_pcie *pci = ep->pci; | ||
86 | struct device *dev = pci->dev; | ||
87 | struct resource *res; | ||
88 | |||
89 | ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); | ||
90 | if (!ep->mem_res) | ||
91 | return -ENOMEM; | ||
92 | |||
93 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
94 | ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); | ||
95 | if (IS_ERR(ep->mem_res->elbi_base)) | ||
96 | return PTR_ERR(ep->mem_res->elbi_base); | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep) | ||
102 | { | ||
103 | struct dw_pcie *pci = ep->pci; | ||
104 | struct device *dev = pci->dev; | ||
105 | |||
106 | ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL); | ||
107 | if (!ep->clk_res) | ||
108 | return -ENOMEM; | ||
109 | |||
110 | ep->clk_res->clk = devm_clk_get(dev, "pcie"); | ||
111 | if (IS_ERR(ep->clk_res->clk)) { | ||
112 | dev_err(dev, "Failed to get pcie rc clock\n"); | ||
113 | return PTR_ERR(ep->clk_res->clk); | ||
114 | } | ||
115 | |||
116 | ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus"); | ||
117 | if (IS_ERR(ep->clk_res->bus_clk)) { | ||
118 | dev_err(dev, "Failed to get pcie bus clock\n"); | ||
119 | return PTR_ERR(ep->clk_res->bus_clk); | ||
120 | } | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep) | ||
126 | { | ||
127 | struct dw_pcie *pci = ep->pci; | ||
128 | struct device *dev = pci->dev; | ||
129 | int ret; | ||
130 | |||
131 | ret = clk_prepare_enable(ep->clk_res->clk); | ||
132 | if (ret) { | ||
133 | dev_err(dev, "cannot enable pcie rc clock"); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | ret = clk_prepare_enable(ep->clk_res->bus_clk); | ||
138 | if (ret) { | ||
139 | dev_err(dev, "cannot enable pcie bus clock"); | ||
140 | goto err_bus_clk; | ||
141 | } | ||
142 | |||
143 | return 0; | ||
144 | |||
145 | err_bus_clk: | ||
146 | clk_disable_unprepare(ep->clk_res->clk); | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep) | ||
152 | { | ||
153 | clk_disable_unprepare(ep->clk_res->bus_clk); | ||
154 | clk_disable_unprepare(ep->clk_res->clk); | ||
155 | } | ||
156 | |||
157 | static const struct exynos_pcie_ops exynos5440_pcie_ops = { | ||
158 | .get_mem_resources = exynos5440_pcie_get_mem_resources, | ||
159 | .get_clk_resources = exynos5440_pcie_get_clk_resources, | ||
160 | .init_clk_resources = exynos5440_pcie_init_clk_resources, | ||
161 | .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources, | ||
162 | }; | ||
163 | |||
164 | static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg) | ||
165 | { | ||
166 | writel(val, base + reg); | ||
167 | } | ||
168 | |||
169 | static u32 exynos_pcie_readl(void __iomem *base, u32 reg) | ||
170 | { | ||
171 | return readl(base + reg); | ||
172 | } | ||
173 | |||
174 | static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on) | ||
175 | { | ||
176 | u32 val; | ||
177 | |||
178 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC); | ||
179 | if (on) | ||
180 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
181 | else | ||
182 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
183 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC); | ||
184 | } | ||
185 | |||
186 | static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on) | ||
187 | { | ||
188 | u32 val; | ||
189 | |||
190 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC); | ||
191 | if (on) | ||
192 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
193 | else | ||
194 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
195 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC); | ||
196 | } | ||
197 | |||
198 | static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep) | ||
199 | { | ||
200 | u32 val; | ||
201 | |||
202 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); | ||
203 | val &= ~PCIE_CORE_RESET_ENABLE; | ||
204 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); | ||
205 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET); | ||
206 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET); | ||
207 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET); | ||
208 | } | ||
209 | |||
210 | static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep) | ||
211 | { | ||
212 | u32 val; | ||
213 | |||
214 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); | ||
215 | val |= PCIE_CORE_RESET_ENABLE; | ||
216 | |||
217 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); | ||
218 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET); | ||
219 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET); | ||
220 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET); | ||
221 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET); | ||
222 | } | ||
223 | |||
224 | static void exynos_pcie_assert_reset(struct exynos_pcie *ep) | ||
225 | { | ||
226 | struct dw_pcie *pci = ep->pci; | ||
227 | struct device *dev = pci->dev; | ||
228 | |||
229 | if (ep->reset_gpio >= 0) | ||
230 | devm_gpio_request_one(dev, ep->reset_gpio, | ||
231 | GPIOF_OUT_INIT_HIGH, "RESET"); | ||
232 | } | ||
233 | |||
234 | static int exynos_pcie_establish_link(struct exynos_pcie *ep) | ||
235 | { | ||
236 | struct dw_pcie *pci = ep->pci; | ||
237 | struct pcie_port *pp = &pci->pp; | ||
238 | struct device *dev = pci->dev; | ||
239 | |||
240 | if (dw_pcie_link_up(pci)) { | ||
241 | dev_err(dev, "Link already up\n"); | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | exynos_pcie_assert_core_reset(ep); | ||
246 | |||
247 | phy_reset(ep->phy); | ||
248 | |||
249 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, | ||
250 | PCIE_PWR_RESET); | ||
251 | |||
252 | phy_power_on(ep->phy); | ||
253 | phy_init(ep->phy); | ||
254 | |||
255 | exynos_pcie_deassert_core_reset(ep); | ||
256 | dw_pcie_setup_rc(pp); | ||
257 | exynos_pcie_assert_reset(ep); | ||
258 | |||
259 | /* assert LTSSM enable */ | ||
260 | exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE, | ||
261 | PCIE_APP_LTSSM_ENABLE); | ||
262 | |||
263 | /* check if the link is up or not */ | ||
264 | if (!dw_pcie_wait_for_link(pci)) | ||
265 | return 0; | ||
266 | |||
267 | phy_power_off(ep->phy); | ||
268 | return -ETIMEDOUT; | ||
269 | } | ||
270 | |||
271 | static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep) | ||
272 | { | ||
273 | u32 val; | ||
274 | |||
275 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE); | ||
276 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE); | ||
277 | } | ||
278 | |||
279 | static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep) | ||
280 | { | ||
281 | u32 val; | ||
282 | |||
283 | /* enable INTX interrupt */ | ||
284 | val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | | ||
285 | IRQ_INTC_ASSERT | IRQ_INTD_ASSERT; | ||
286 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE); | ||
287 | } | ||
288 | |||
289 | static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) | ||
290 | { | ||
291 | struct exynos_pcie *ep = arg; | ||
292 | |||
293 | exynos_pcie_clear_irq_pulse(ep); | ||
294 | return IRQ_HANDLED; | ||
295 | } | ||
296 | |||
297 | static void exynos_pcie_msi_init(struct exynos_pcie *ep) | ||
298 | { | ||
299 | struct dw_pcie *pci = ep->pci; | ||
300 | struct pcie_port *pp = &pci->pp; | ||
301 | u32 val; | ||
302 | |||
303 | dw_pcie_msi_init(pp); | ||
304 | |||
305 | /* enable MSI interrupt */ | ||
306 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL); | ||
307 | val |= IRQ_MSI_ENABLE; | ||
308 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL); | ||
309 | } | ||
310 | |||
311 | static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep) | ||
312 | { | ||
313 | exynos_pcie_enable_irq_pulse(ep); | ||
314 | |||
315 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
316 | exynos_pcie_msi_init(ep); | ||
317 | } | ||
318 | |||
319 | static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, | ||
320 | u32 reg, size_t size) | ||
321 | { | ||
322 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
323 | u32 val; | ||
324 | |||
325 | exynos_pcie_sideband_dbi_r_mode(ep, true); | ||
326 | dw_pcie_read(base + reg, size, &val); | ||
327 | exynos_pcie_sideband_dbi_r_mode(ep, false); | ||
328 | return val; | ||
329 | } | ||
330 | |||
331 | static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, | ||
332 | u32 reg, size_t size, u32 val) | ||
333 | { | ||
334 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
335 | |||
336 | exynos_pcie_sideband_dbi_w_mode(ep, true); | ||
337 | dw_pcie_write(base + reg, size, val); | ||
338 | exynos_pcie_sideband_dbi_w_mode(ep, false); | ||
339 | } | ||
340 | |||
341 | static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
342 | u32 *val) | ||
343 | { | ||
344 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
345 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
346 | int ret; | ||
347 | |||
348 | exynos_pcie_sideband_dbi_r_mode(ep, true); | ||
349 | ret = dw_pcie_read(pci->dbi_base + where, size, val); | ||
350 | exynos_pcie_sideband_dbi_r_mode(ep, false); | ||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | ||
355 | u32 val) | ||
356 | { | ||
357 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
358 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
359 | int ret; | ||
360 | |||
361 | exynos_pcie_sideband_dbi_w_mode(ep, true); | ||
362 | ret = dw_pcie_write(pci->dbi_base + where, size, val); | ||
363 | exynos_pcie_sideband_dbi_w_mode(ep, false); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | static int exynos_pcie_link_up(struct dw_pcie *pci) | ||
368 | { | ||
369 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
370 | u32 val; | ||
371 | |||
372 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP); | ||
373 | if (val == PCIE_ELBI_LTSSM_ENABLE) | ||
374 | return 1; | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int exynos_pcie_host_init(struct pcie_port *pp) | ||
380 | { | ||
381 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
382 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
383 | |||
384 | exynos_pcie_establish_link(ep); | ||
385 | exynos_pcie_enable_interrupts(ep); | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static const struct dw_pcie_host_ops exynos_pcie_host_ops = { | ||
391 | .rd_own_conf = exynos_pcie_rd_own_conf, | ||
392 | .wr_own_conf = exynos_pcie_wr_own_conf, | ||
393 | .host_init = exynos_pcie_host_init, | ||
394 | }; | ||
395 | |||
396 | static int __init exynos_add_pcie_port(struct exynos_pcie *ep, | ||
397 | struct platform_device *pdev) | ||
398 | { | ||
399 | struct dw_pcie *pci = ep->pci; | ||
400 | struct pcie_port *pp = &pci->pp; | ||
401 | struct device *dev = &pdev->dev; | ||
402 | int ret; | ||
403 | |||
404 | pp->irq = platform_get_irq(pdev, 1); | ||
405 | if (pp->irq < 0) { | ||
406 | dev_err(dev, "failed to get irq\n"); | ||
407 | return pp->irq; | ||
408 | } | ||
409 | ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, | ||
410 | IRQF_SHARED, "exynos-pcie", ep); | ||
411 | if (ret) { | ||
412 | dev_err(dev, "failed to request irq\n"); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
417 | pp->msi_irq = platform_get_irq(pdev, 0); | ||
418 | if (pp->msi_irq < 0) { | ||
419 | dev_err(dev, "failed to get msi irq\n"); | ||
420 | return pp->msi_irq; | ||
421 | } | ||
422 | } | ||
423 | |||
424 | pp->root_bus_nr = -1; | ||
425 | pp->ops = &exynos_pcie_host_ops; | ||
426 | |||
427 | ret = dw_pcie_host_init(pp); | ||
428 | if (ret) { | ||
429 | dev_err(dev, "failed to initialize host\n"); | ||
430 | return ret; | ||
431 | } | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
437 | .read_dbi = exynos_pcie_read_dbi, | ||
438 | .write_dbi = exynos_pcie_write_dbi, | ||
439 | .link_up = exynos_pcie_link_up, | ||
440 | }; | ||
441 | |||
442 | static int __init exynos_pcie_probe(struct platform_device *pdev) | ||
443 | { | ||
444 | struct device *dev = &pdev->dev; | ||
445 | struct dw_pcie *pci; | ||
446 | struct exynos_pcie *ep; | ||
447 | struct device_node *np = dev->of_node; | ||
448 | int ret; | ||
449 | |||
450 | ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); | ||
451 | if (!ep) | ||
452 | return -ENOMEM; | ||
453 | |||
454 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
455 | if (!pci) | ||
456 | return -ENOMEM; | ||
457 | |||
458 | pci->dev = dev; | ||
459 | pci->ops = &dw_pcie_ops; | ||
460 | |||
461 | ep->pci = pci; | ||
462 | ep->ops = (const struct exynos_pcie_ops *) | ||
463 | of_device_get_match_data(dev); | ||
464 | |||
465 | ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); | ||
466 | |||
467 | ep->phy = devm_of_phy_get(dev, np, NULL); | ||
468 | if (IS_ERR(ep->phy)) { | ||
469 | if (PTR_ERR(ep->phy) == -EPROBE_DEFER) | ||
470 | return PTR_ERR(ep->phy); | ||
471 | |||
472 | ep->phy = NULL; | ||
473 | } | ||
474 | |||
475 | if (ep->ops && ep->ops->get_mem_resources) { | ||
476 | ret = ep->ops->get_mem_resources(pdev, ep); | ||
477 | if (ret) | ||
478 | return ret; | ||
479 | } | ||
480 | |||
481 | if (ep->ops && ep->ops->get_clk_resources && | ||
482 | ep->ops->init_clk_resources) { | ||
483 | ret = ep->ops->get_clk_resources(ep); | ||
484 | if (ret) | ||
485 | return ret; | ||
486 | ret = ep->ops->init_clk_resources(ep); | ||
487 | if (ret) | ||
488 | return ret; | ||
489 | } | ||
490 | |||
491 | platform_set_drvdata(pdev, ep); | ||
492 | |||
493 | ret = exynos_add_pcie_port(ep, pdev); | ||
494 | if (ret < 0) | ||
495 | goto fail_probe; | ||
496 | |||
497 | return 0; | ||
498 | |||
499 | fail_probe: | ||
500 | phy_exit(ep->phy); | ||
501 | |||
502 | if (ep->ops && ep->ops->deinit_clk_resources) | ||
503 | ep->ops->deinit_clk_resources(ep); | ||
504 | return ret; | ||
505 | } | ||
506 | |||
507 | static int __exit exynos_pcie_remove(struct platform_device *pdev) | ||
508 | { | ||
509 | struct exynos_pcie *ep = platform_get_drvdata(pdev); | ||
510 | |||
511 | if (ep->ops && ep->ops->deinit_clk_resources) | ||
512 | ep->ops->deinit_clk_resources(ep); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static const struct of_device_id exynos_pcie_of_match[] = { | ||
518 | { | ||
519 | .compatible = "samsung,exynos5440-pcie", | ||
520 | .data = &exynos5440_pcie_ops | ||
521 | }, | ||
522 | {}, | ||
523 | }; | ||
524 | |||
525 | static struct platform_driver exynos_pcie_driver = { | ||
526 | .remove = __exit_p(exynos_pcie_remove), | ||
527 | .driver = { | ||
528 | .name = "exynos-pcie", | ||
529 | .of_match_table = exynos_pcie_of_match, | ||
530 | }, | ||
531 | }; | ||
532 | |||
533 | /* Exynos PCIe driver does not allow module unload */ | ||
534 | |||
535 | static int __init exynos_pcie_init(void) | ||
536 | { | ||
537 | return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); | ||
538 | } | ||
539 | subsys_initcall(exynos_pcie_init); | ||
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c new file mode 100644 index 000000000000..80f604602783 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-imx6.c | |||
@@ -0,0 +1,871 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for Freescale i.MX6 SoCs | ||
4 | * | ||
5 | * Copyright (C) 2013 Kosagi | ||
6 | * http://www.kosagi.com | ||
7 | * | ||
8 | * Author: Sean Cross <xobs@kosagi.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/gpio.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mfd/syscon.h> | ||
16 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | ||
17 | #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/of_gpio.h> | ||
20 | #include <linux/of_device.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/regmap.h> | ||
24 | #include <linux/regulator/consumer.h> | ||
25 | #include <linux/resource.h> | ||
26 | #include <linux/signal.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/reset.h> | ||
30 | |||
31 | #include "pcie-designware.h" | ||
32 | |||
33 | #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) | ||
34 | |||
35 | enum imx6_pcie_variants { | ||
36 | IMX6Q, | ||
37 | IMX6SX, | ||
38 | IMX6QP, | ||
39 | IMX7D, | ||
40 | }; | ||
41 | |||
42 | struct imx6_pcie { | ||
43 | struct dw_pcie *pci; | ||
44 | int reset_gpio; | ||
45 | bool gpio_active_high; | ||
46 | struct clk *pcie_bus; | ||
47 | struct clk *pcie_phy; | ||
48 | struct clk *pcie_inbound_axi; | ||
49 | struct clk *pcie; | ||
50 | struct regmap *iomuxc_gpr; | ||
51 | struct reset_control *pciephy_reset; | ||
52 | struct reset_control *apps_reset; | ||
53 | enum imx6_pcie_variants variant; | ||
54 | u32 tx_deemph_gen1; | ||
55 | u32 tx_deemph_gen2_3p5db; | ||
56 | u32 tx_deemph_gen2_6db; | ||
57 | u32 tx_swing_full; | ||
58 | u32 tx_swing_low; | ||
59 | int link_gen; | ||
60 | struct regulator *vpcie; | ||
61 | }; | ||
62 | |||
63 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ | ||
64 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 | ||
65 | #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 | ||
66 | #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 | ||
67 | |||
68 | /* PCIe Root Complex registers (memory-mapped) */ | ||
69 | #define PCIE_RC_LCR 0x7c | ||
70 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 | ||
71 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 | ||
72 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf | ||
73 | |||
74 | #define PCIE_RC_LCSR 0x80 | ||
75 | |||
76 | /* PCIe Port Logic registers (memory-mapped) */ | ||
77 | #define PL_OFFSET 0x700 | ||
78 | #define PCIE_PL_PFLR (PL_OFFSET + 0x08) | ||
79 | #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) | ||
80 | #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) | ||
81 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) | ||
82 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) | ||
83 | #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) | ||
84 | #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) | ||
85 | |||
86 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) | ||
87 | #define PCIE_PHY_CTRL_DATA_LOC 0 | ||
88 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 | ||
89 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 | ||
90 | #define PCIE_PHY_CTRL_WR_LOC 18 | ||
91 | #define PCIE_PHY_CTRL_RD_LOC 19 | ||
92 | |||
93 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) | ||
94 | #define PCIE_PHY_STAT_ACK_LOC 16 | ||
95 | |||
96 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | ||
97 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | ||
98 | |||
99 | /* PHY registers (not memory-mapped) */ | ||
100 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | ||
101 | #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) | ||
102 | |||
103 | #define PHY_RX_OVRD_IN_LO 0x1005 | ||
104 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) | ||
105 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) | ||
106 | |||
107 | static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) | ||
108 | { | ||
109 | struct dw_pcie *pci = imx6_pcie->pci; | ||
110 | u32 val; | ||
111 | u32 max_iterations = 10; | ||
112 | u32 wait_counter = 0; | ||
113 | |||
114 | do { | ||
115 | val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); | ||
116 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; | ||
117 | wait_counter++; | ||
118 | |||
119 | if (val == exp_val) | ||
120 | return 0; | ||
121 | |||
122 | udelay(1); | ||
123 | } while (wait_counter < max_iterations); | ||
124 | |||
125 | return -ETIMEDOUT; | ||
126 | } | ||
127 | |||
128 | static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) | ||
129 | { | ||
130 | struct dw_pcie *pci = imx6_pcie->pci; | ||
131 | u32 val; | ||
132 | int ret; | ||
133 | |||
134 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | ||
135 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | ||
136 | |||
137 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); | ||
138 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | ||
139 | |||
140 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
141 | if (ret) | ||
142 | return ret; | ||
143 | |||
144 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | ||
145 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | ||
146 | |||
147 | return pcie_phy_poll_ack(imx6_pcie, 0); | ||
148 | } | ||
149 | |||
150 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ | ||
151 | static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) | ||
152 | { | ||
153 | struct dw_pcie *pci = imx6_pcie->pci; | ||
154 | u32 val, phy_ctl; | ||
155 | int ret; | ||
156 | |||
157 | ret = pcie_phy_wait_ack(imx6_pcie, addr); | ||
158 | if (ret) | ||
159 | return ret; | ||
160 | |||
161 | /* assert Read signal */ | ||
162 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; | ||
163 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); | ||
164 | |||
165 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
166 | if (ret) | ||
167 | return ret; | ||
168 | |||
169 | val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); | ||
170 | *data = val & 0xffff; | ||
171 | |||
172 | /* deassert Read signal */ | ||
173 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); | ||
174 | |||
175 | return pcie_phy_poll_ack(imx6_pcie, 0); | ||
176 | } | ||
177 | |||
178 | static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) | ||
179 | { | ||
180 | struct dw_pcie *pci = imx6_pcie->pci; | ||
181 | u32 var; | ||
182 | int ret; | ||
183 | |||
184 | /* write addr */ | ||
185 | /* cap addr */ | ||
186 | ret = pcie_phy_wait_ack(imx6_pcie, addr); | ||
187 | if (ret) | ||
188 | return ret; | ||
189 | |||
190 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
191 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
192 | |||
193 | /* capture data */ | ||
194 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); | ||
195 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
196 | |||
197 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
198 | if (ret) | ||
199 | return ret; | ||
200 | |||
201 | /* deassert cap data */ | ||
202 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
203 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
204 | |||
205 | /* wait for ack de-assertion */ | ||
206 | ret = pcie_phy_poll_ack(imx6_pcie, 0); | ||
207 | if (ret) | ||
208 | return ret; | ||
209 | |||
210 | /* assert wr signal */ | ||
211 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; | ||
212 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
213 | |||
214 | /* wait for ack */ | ||
215 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
216 | if (ret) | ||
217 | return ret; | ||
218 | |||
219 | /* deassert wr signal */ | ||
220 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
221 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
222 | |||
223 | /* wait for ack de-assertion */ | ||
224 | ret = pcie_phy_poll_ack(imx6_pcie, 0); | ||
225 | if (ret) | ||
226 | return ret; | ||
227 | |||
228 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) | ||
234 | { | ||
235 | u32 tmp; | ||
236 | |||
237 | pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); | ||
238 | tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | | ||
239 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
240 | pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); | ||
241 | |||
242 | usleep_range(2000, 3000); | ||
243 | |||
244 | pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); | ||
245 | tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | | ||
246 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
247 | pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); | ||
248 | } | ||
249 | |||
250 | /* Added for PCI abort handling */ | ||
251 | static int imx6q_pcie_abort_handler(unsigned long addr, | ||
252 | unsigned int fsr, struct pt_regs *regs) | ||
253 | { | ||
254 | unsigned long pc = instruction_pointer(regs); | ||
255 | unsigned long instr = *(unsigned long *)pc; | ||
256 | int reg = (instr >> 12) & 15; | ||
257 | |||
258 | /* | ||
259 | * If the instruction being executed was a read, | ||
260 | * make it look like it read all-ones. | ||
261 | */ | ||
262 | if ((instr & 0x0c100000) == 0x04100000) { | ||
263 | unsigned long val; | ||
264 | |||
265 | if (instr & 0x00400000) | ||
266 | val = 255; | ||
267 | else | ||
268 | val = -1; | ||
269 | |||
270 | regs->uregs[reg] = val; | ||
271 | regs->ARM_pc += 4; | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | if ((instr & 0x0e100090) == 0x00100090) { | ||
276 | regs->uregs[reg] = -1; | ||
277 | regs->ARM_pc += 4; | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | return 1; | ||
282 | } | ||
283 | |||
284 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | ||
285 | { | ||
286 | struct device *dev = imx6_pcie->pci->dev; | ||
287 | |||
288 | switch (imx6_pcie->variant) { | ||
289 | case IMX7D: | ||
290 | reset_control_assert(imx6_pcie->pciephy_reset); | ||
291 | reset_control_assert(imx6_pcie->apps_reset); | ||
292 | break; | ||
293 | case IMX6SX: | ||
294 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
295 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, | ||
296 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN); | ||
297 | /* Force PCIe PHY reset */ | ||
298 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, | ||
299 | IMX6SX_GPR5_PCIE_BTNRST_RESET, | ||
300 | IMX6SX_GPR5_PCIE_BTNRST_RESET); | ||
301 | break; | ||
302 | case IMX6QP: | ||
303 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
304 | IMX6Q_GPR1_PCIE_SW_RST, | ||
305 | IMX6Q_GPR1_PCIE_SW_RST); | ||
306 | break; | ||
307 | case IMX6Q: | ||
308 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
309 | IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); | ||
310 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
311 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); | ||
312 | break; | ||
313 | } | ||
314 | |||
315 | if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { | ||
316 | int ret = regulator_disable(imx6_pcie->vpcie); | ||
317 | |||
318 | if (ret) | ||
319 | dev_err(dev, "failed to disable vpcie regulator: %d\n", | ||
320 | ret); | ||
321 | } | ||
322 | } | ||
323 | |||
324 | static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | ||
325 | { | ||
326 | struct dw_pcie *pci = imx6_pcie->pci; | ||
327 | struct device *dev = pci->dev; | ||
328 | int ret = 0; | ||
329 | |||
330 | switch (imx6_pcie->variant) { | ||
331 | case IMX6SX: | ||
332 | ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); | ||
333 | if (ret) { | ||
334 | dev_err(dev, "unable to enable pcie_axi clock\n"); | ||
335 | break; | ||
336 | } | ||
337 | |||
338 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
339 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); | ||
340 | break; | ||
341 | case IMX6QP: /* FALLTHROUGH */ | ||
342 | case IMX6Q: | ||
343 | /* power up core phy and enable ref clock */ | ||
344 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
345 | IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); | ||
346 | /* | ||
347 | * the async reset input need ref clock to sync internally, | ||
348 | * when the ref clock comes after reset, internal synced | ||
349 | * reset time is too short, cannot meet the requirement. | ||
350 | * add one ~10us delay here. | ||
351 | */ | ||
352 | udelay(10); | ||
353 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
354 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); | ||
355 | break; | ||
356 | case IMX7D: | ||
357 | break; | ||
358 | } | ||
359 | |||
360 | return ret; | ||
361 | } | ||
362 | |||
363 | static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) | ||
364 | { | ||
365 | u32 val; | ||
366 | unsigned int retries; | ||
367 | struct device *dev = imx6_pcie->pci->dev; | ||
368 | |||
369 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { | ||
370 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); | ||
371 | |||
372 | if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) | ||
373 | return; | ||
374 | |||
375 | usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, | ||
376 | PHY_PLL_LOCK_WAIT_USLEEP_MAX); | ||
377 | } | ||
378 | |||
379 | dev_err(dev, "PCIe PLL lock timeout\n"); | ||
380 | } | ||
381 | |||
382 | static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) | ||
383 | { | ||
384 | struct dw_pcie *pci = imx6_pcie->pci; | ||
385 | struct device *dev = pci->dev; | ||
386 | int ret; | ||
387 | |||
388 | if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { | ||
389 | ret = regulator_enable(imx6_pcie->vpcie); | ||
390 | if (ret) { | ||
391 | dev_err(dev, "failed to enable vpcie regulator: %d\n", | ||
392 | ret); | ||
393 | return; | ||
394 | } | ||
395 | } | ||
396 | |||
397 | ret = clk_prepare_enable(imx6_pcie->pcie_phy); | ||
398 | if (ret) { | ||
399 | dev_err(dev, "unable to enable pcie_phy clock\n"); | ||
400 | goto err_pcie_phy; | ||
401 | } | ||
402 | |||
403 | ret = clk_prepare_enable(imx6_pcie->pcie_bus); | ||
404 | if (ret) { | ||
405 | dev_err(dev, "unable to enable pcie_bus clock\n"); | ||
406 | goto err_pcie_bus; | ||
407 | } | ||
408 | |||
409 | ret = clk_prepare_enable(imx6_pcie->pcie); | ||
410 | if (ret) { | ||
411 | dev_err(dev, "unable to enable pcie clock\n"); | ||
412 | goto err_pcie; | ||
413 | } | ||
414 | |||
415 | ret = imx6_pcie_enable_ref_clk(imx6_pcie); | ||
416 | if (ret) { | ||
417 | dev_err(dev, "unable to enable pcie ref clock\n"); | ||
418 | goto err_ref_clk; | ||
419 | } | ||
420 | |||
421 | /* allow the clocks to stabilize */ | ||
422 | usleep_range(200, 500); | ||
423 | |||
424 | /* Some boards don't have PCIe reset GPIO. */ | ||
425 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { | ||
426 | gpio_set_value_cansleep(imx6_pcie->reset_gpio, | ||
427 | imx6_pcie->gpio_active_high); | ||
428 | msleep(100); | ||
429 | gpio_set_value_cansleep(imx6_pcie->reset_gpio, | ||
430 | !imx6_pcie->gpio_active_high); | ||
431 | } | ||
432 | |||
433 | switch (imx6_pcie->variant) { | ||
434 | case IMX7D: | ||
435 | reset_control_deassert(imx6_pcie->pciephy_reset); | ||
436 | imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); | ||
437 | break; | ||
438 | case IMX6SX: | ||
439 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, | ||
440 | IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); | ||
441 | break; | ||
442 | case IMX6QP: | ||
443 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
444 | IMX6Q_GPR1_PCIE_SW_RST, 0); | ||
445 | |||
446 | usleep_range(200, 500); | ||
447 | break; | ||
448 | case IMX6Q: /* Nothing to do */ | ||
449 | break; | ||
450 | } | ||
451 | |||
452 | return; | ||
453 | |||
454 | err_ref_clk: | ||
455 | clk_disable_unprepare(imx6_pcie->pcie); | ||
456 | err_pcie: | ||
457 | clk_disable_unprepare(imx6_pcie->pcie_bus); | ||
458 | err_pcie_bus: | ||
459 | clk_disable_unprepare(imx6_pcie->pcie_phy); | ||
460 | err_pcie_phy: | ||
461 | if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { | ||
462 | ret = regulator_disable(imx6_pcie->vpcie); | ||
463 | if (ret) | ||
464 | dev_err(dev, "failed to disable vpcie regulator: %d\n", | ||
465 | ret); | ||
466 | } | ||
467 | } | ||
468 | |||
469 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) | ||
470 | { | ||
471 | switch (imx6_pcie->variant) { | ||
472 | case IMX7D: | ||
473 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
474 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); | ||
475 | break; | ||
476 | case IMX6SX: | ||
477 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
478 | IMX6SX_GPR12_PCIE_RX_EQ_MASK, | ||
479 | IMX6SX_GPR12_PCIE_RX_EQ_2); | ||
480 | /* FALLTHROUGH */ | ||
481 | default: | ||
482 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
483 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | ||
484 | |||
485 | /* configure constant input signal to the pcie ctrl and phy */ | ||
486 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
487 | IMX6Q_GPR12_LOS_LEVEL, 9 << 4); | ||
488 | |||
489 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
490 | IMX6Q_GPR8_TX_DEEMPH_GEN1, | ||
491 | imx6_pcie->tx_deemph_gen1 << 0); | ||
492 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
493 | IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, | ||
494 | imx6_pcie->tx_deemph_gen2_3p5db << 6); | ||
495 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
496 | IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, | ||
497 | imx6_pcie->tx_deemph_gen2_6db << 12); | ||
498 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
499 | IMX6Q_GPR8_TX_SWING_FULL, | ||
500 | imx6_pcie->tx_swing_full << 18); | ||
501 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
502 | IMX6Q_GPR8_TX_SWING_LOW, | ||
503 | imx6_pcie->tx_swing_low << 25); | ||
504 | break; | ||
505 | } | ||
506 | |||
507 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
508 | IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); | ||
509 | } | ||
510 | |||
511 | static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) | ||
512 | { | ||
513 | struct dw_pcie *pci = imx6_pcie->pci; | ||
514 | struct device *dev = pci->dev; | ||
515 | |||
516 | /* check if the link is up or not */ | ||
517 | if (!dw_pcie_wait_for_link(pci)) | ||
518 | return 0; | ||
519 | |||
520 | dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | ||
521 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), | ||
522 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); | ||
523 | return -ETIMEDOUT; | ||
524 | } | ||
525 | |||
526 | static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) | ||
527 | { | ||
528 | struct dw_pcie *pci = imx6_pcie->pci; | ||
529 | struct device *dev = pci->dev; | ||
530 | u32 tmp; | ||
531 | unsigned int retries; | ||
532 | |||
533 | for (retries = 0; retries < 200; retries++) { | ||
534 | tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
535 | /* Test if the speed change finished. */ | ||
536 | if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) | ||
537 | return 0; | ||
538 | usleep_range(100, 1000); | ||
539 | } | ||
540 | |||
541 | dev_err(dev, "Speed change timeout\n"); | ||
542 | return -EINVAL; | ||
543 | } | ||
544 | |||
545 | static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | ||
546 | { | ||
547 | struct dw_pcie *pci = imx6_pcie->pci; | ||
548 | struct device *dev = pci->dev; | ||
549 | u32 tmp; | ||
550 | int ret; | ||
551 | |||
552 | /* | ||
553 | * Force Gen1 operation when starting the link. In case the link is | ||
554 | * started in Gen2 mode, there is a possibility the devices on the | ||
555 | * bus will not be detected at all. This happens with PCIe switches. | ||
556 | */ | ||
557 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); | ||
558 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | ||
559 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; | ||
560 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); | ||
561 | |||
562 | /* Start LTSSM. */ | ||
563 | if (imx6_pcie->variant == IMX7D) | ||
564 | reset_control_deassert(imx6_pcie->apps_reset); | ||
565 | else | ||
566 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
567 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | ||
568 | |||
569 | ret = imx6_pcie_wait_for_link(imx6_pcie); | ||
570 | if (ret) | ||
571 | goto err_reset_phy; | ||
572 | |||
573 | if (imx6_pcie->link_gen == 2) { | ||
574 | /* Allow Gen2 mode after the link is up. */ | ||
575 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); | ||
576 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | ||
577 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; | ||
578 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); | ||
579 | |||
580 | /* | ||
581 | * Start Directed Speed Change so the best possible | ||
582 | * speed both link partners support can be negotiated. | ||
583 | */ | ||
584 | tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
585 | tmp |= PORT_LOGIC_SPEED_CHANGE; | ||
586 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); | ||
587 | |||
588 | if (imx6_pcie->variant != IMX7D) { | ||
589 | /* | ||
590 | * On i.MX7, DIRECT_SPEED_CHANGE behaves differently | ||
591 | * from i.MX6 family when no link speed transition | ||
592 | * occurs and we go Gen1 -> yep, Gen1. The difference | ||
593 | * is that, in such case, it will not be cleared by HW | ||
594 | * which will cause the following code to report false | ||
595 | * failure. | ||
596 | */ | ||
597 | |||
598 | ret = imx6_pcie_wait_for_speed_change(imx6_pcie); | ||
599 | if (ret) { | ||
600 | dev_err(dev, "Failed to bring link up!\n"); | ||
601 | goto err_reset_phy; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | /* Make sure link training is finished as well! */ | ||
606 | ret = imx6_pcie_wait_for_link(imx6_pcie); | ||
607 | if (ret) { | ||
608 | dev_err(dev, "Failed to bring link up!\n"); | ||
609 | goto err_reset_phy; | ||
610 | } | ||
611 | } else { | ||
612 | dev_info(dev, "Link: Gen2 disabled\n"); | ||
613 | } | ||
614 | |||
615 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); | ||
616 | dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); | ||
617 | return 0; | ||
618 | |||
619 | err_reset_phy: | ||
620 | dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", | ||
621 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), | ||
622 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); | ||
623 | imx6_pcie_reset_phy(imx6_pcie); | ||
624 | return ret; | ||
625 | } | ||
626 | |||
627 | static int imx6_pcie_host_init(struct pcie_port *pp) | ||
628 | { | ||
629 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
630 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); | ||
631 | |||
632 | imx6_pcie_assert_core_reset(imx6_pcie); | ||
633 | imx6_pcie_init_phy(imx6_pcie); | ||
634 | imx6_pcie_deassert_core_reset(imx6_pcie); | ||
635 | dw_pcie_setup_rc(pp); | ||
636 | imx6_pcie_establish_link(imx6_pcie); | ||
637 | |||
638 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
639 | dw_pcie_msi_init(pp); | ||
640 | |||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | static int imx6_pcie_link_up(struct dw_pcie *pci) | ||
645 | { | ||
646 | return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) & | ||
647 | PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; | ||
648 | } | ||
649 | |||
650 | static const struct dw_pcie_host_ops imx6_pcie_host_ops = { | ||
651 | .host_init = imx6_pcie_host_init, | ||
652 | }; | ||
653 | |||
654 | static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, | ||
655 | struct platform_device *pdev) | ||
656 | { | ||
657 | struct dw_pcie *pci = imx6_pcie->pci; | ||
658 | struct pcie_port *pp = &pci->pp; | ||
659 | struct device *dev = &pdev->dev; | ||
660 | int ret; | ||
661 | |||
662 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
663 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
664 | if (pp->msi_irq <= 0) { | ||
665 | dev_err(dev, "failed to get MSI irq\n"); | ||
666 | return -ENODEV; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | pp->root_bus_nr = -1; | ||
671 | pp->ops = &imx6_pcie_host_ops; | ||
672 | |||
673 | ret = dw_pcie_host_init(pp); | ||
674 | if (ret) { | ||
675 | dev_err(dev, "failed to initialize host\n"); | ||
676 | return ret; | ||
677 | } | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
683 | .link_up = imx6_pcie_link_up, | ||
684 | }; | ||
685 | |||
686 | static int imx6_pcie_probe(struct platform_device *pdev) | ||
687 | { | ||
688 | struct device *dev = &pdev->dev; | ||
689 | struct dw_pcie *pci; | ||
690 | struct imx6_pcie *imx6_pcie; | ||
691 | struct resource *dbi_base; | ||
692 | struct device_node *node = dev->of_node; | ||
693 | int ret; | ||
694 | |||
695 | imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); | ||
696 | if (!imx6_pcie) | ||
697 | return -ENOMEM; | ||
698 | |||
699 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
700 | if (!pci) | ||
701 | return -ENOMEM; | ||
702 | |||
703 | pci->dev = dev; | ||
704 | pci->ops = &dw_pcie_ops; | ||
705 | |||
706 | imx6_pcie->pci = pci; | ||
707 | imx6_pcie->variant = | ||
708 | (enum imx6_pcie_variants)of_device_get_match_data(dev); | ||
709 | |||
710 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
711 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | ||
712 | if (IS_ERR(pci->dbi_base)) | ||
713 | return PTR_ERR(pci->dbi_base); | ||
714 | |||
715 | /* Fetch GPIOs */ | ||
716 | imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); | ||
717 | imx6_pcie->gpio_active_high = of_property_read_bool(node, | ||
718 | "reset-gpio-active-high"); | ||
719 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { | ||
720 | ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, | ||
721 | imx6_pcie->gpio_active_high ? | ||
722 | GPIOF_OUT_INIT_HIGH : | ||
723 | GPIOF_OUT_INIT_LOW, | ||
724 | "PCIe reset"); | ||
725 | if (ret) { | ||
726 | dev_err(dev, "unable to get reset gpio\n"); | ||
727 | return ret; | ||
728 | } | ||
729 | } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { | ||
730 | return imx6_pcie->reset_gpio; | ||
731 | } | ||
732 | |||
733 | /* Fetch clocks */ | ||
734 | imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); | ||
735 | if (IS_ERR(imx6_pcie->pcie_phy)) { | ||
736 | dev_err(dev, "pcie_phy clock source missing or invalid\n"); | ||
737 | return PTR_ERR(imx6_pcie->pcie_phy); | ||
738 | } | ||
739 | |||
740 | imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); | ||
741 | if (IS_ERR(imx6_pcie->pcie_bus)) { | ||
742 | dev_err(dev, "pcie_bus clock source missing or invalid\n"); | ||
743 | return PTR_ERR(imx6_pcie->pcie_bus); | ||
744 | } | ||
745 | |||
746 | imx6_pcie->pcie = devm_clk_get(dev, "pcie"); | ||
747 | if (IS_ERR(imx6_pcie->pcie)) { | ||
748 | dev_err(dev, "pcie clock source missing or invalid\n"); | ||
749 | return PTR_ERR(imx6_pcie->pcie); | ||
750 | } | ||
751 | |||
752 | switch (imx6_pcie->variant) { | ||
753 | case IMX6SX: | ||
754 | imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, | ||
755 | "pcie_inbound_axi"); | ||
756 | if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { | ||
757 | dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); | ||
758 | return PTR_ERR(imx6_pcie->pcie_inbound_axi); | ||
759 | } | ||
760 | break; | ||
761 | case IMX7D: | ||
762 | imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, | ||
763 | "pciephy"); | ||
764 | if (IS_ERR(imx6_pcie->pciephy_reset)) { | ||
765 | dev_err(dev, "Failed to get PCIEPHY reset control\n"); | ||
766 | return PTR_ERR(imx6_pcie->pciephy_reset); | ||
767 | } | ||
768 | |||
769 | imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, | ||
770 | "apps"); | ||
771 | if (IS_ERR(imx6_pcie->apps_reset)) { | ||
772 | dev_err(dev, "Failed to get PCIE APPS reset control\n"); | ||
773 | return PTR_ERR(imx6_pcie->apps_reset); | ||
774 | } | ||
775 | break; | ||
776 | default: | ||
777 | break; | ||
778 | } | ||
779 | |||
780 | /* Grab GPR config register range */ | ||
781 | imx6_pcie->iomuxc_gpr = | ||
782 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | ||
783 | if (IS_ERR(imx6_pcie->iomuxc_gpr)) { | ||
784 | dev_err(dev, "unable to find iomuxc registers\n"); | ||
785 | return PTR_ERR(imx6_pcie->iomuxc_gpr); | ||
786 | } | ||
787 | |||
788 | /* Grab PCIe PHY Tx Settings */ | ||
789 | if (of_property_read_u32(node, "fsl,tx-deemph-gen1", | ||
790 | &imx6_pcie->tx_deemph_gen1)) | ||
791 | imx6_pcie->tx_deemph_gen1 = 0; | ||
792 | |||
793 | if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", | ||
794 | &imx6_pcie->tx_deemph_gen2_3p5db)) | ||
795 | imx6_pcie->tx_deemph_gen2_3p5db = 0; | ||
796 | |||
797 | if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", | ||
798 | &imx6_pcie->tx_deemph_gen2_6db)) | ||
799 | imx6_pcie->tx_deemph_gen2_6db = 20; | ||
800 | |||
801 | if (of_property_read_u32(node, "fsl,tx-swing-full", | ||
802 | &imx6_pcie->tx_swing_full)) | ||
803 | imx6_pcie->tx_swing_full = 127; | ||
804 | |||
805 | if (of_property_read_u32(node, "fsl,tx-swing-low", | ||
806 | &imx6_pcie->tx_swing_low)) | ||
807 | imx6_pcie->tx_swing_low = 127; | ||
808 | |||
809 | /* Limit link speed */ | ||
810 | ret = of_property_read_u32(node, "fsl,max-link-speed", | ||
811 | &imx6_pcie->link_gen); | ||
812 | if (ret) | ||
813 | imx6_pcie->link_gen = 1; | ||
814 | |||
815 | imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); | ||
816 | if (IS_ERR(imx6_pcie->vpcie)) { | ||
817 | if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) | ||
818 | return -EPROBE_DEFER; | ||
819 | imx6_pcie->vpcie = NULL; | ||
820 | } | ||
821 | |||
822 | platform_set_drvdata(pdev, imx6_pcie); | ||
823 | |||
824 | ret = imx6_add_pcie_port(imx6_pcie, pdev); | ||
825 | if (ret < 0) | ||
826 | return ret; | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | static void imx6_pcie_shutdown(struct platform_device *pdev) | ||
832 | { | ||
833 | struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); | ||
834 | |||
835 | /* bring down link, so bootloader gets clean state in case of reboot */ | ||
836 | imx6_pcie_assert_core_reset(imx6_pcie); | ||
837 | } | ||
838 | |||
839 | static const struct of_device_id imx6_pcie_of_match[] = { | ||
840 | { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, | ||
841 | { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, | ||
842 | { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, | ||
843 | { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, | ||
844 | {}, | ||
845 | }; | ||
846 | |||
847 | static struct platform_driver imx6_pcie_driver = { | ||
848 | .driver = { | ||
849 | .name = "imx6q-pcie", | ||
850 | .of_match_table = imx6_pcie_of_match, | ||
851 | .suppress_bind_attrs = true, | ||
852 | }, | ||
853 | .probe = imx6_pcie_probe, | ||
854 | .shutdown = imx6_pcie_shutdown, | ||
855 | }; | ||
856 | |||
857 | static int __init imx6_pcie_init(void) | ||
858 | { | ||
859 | /* | ||
860 | * Since probe() can be deferred we need to make sure that | ||
861 | * hook_fault_code is not called after __init memory is freed | ||
862 | * by kernel and since imx6q_pcie_abort_handler() is a no-op, | ||
863 | * we can install the handler here without risking it | ||
864 | * accessing some uninitialized driver state. | ||
865 | */ | ||
866 | hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, | ||
867 | "external abort on non-linefetch"); | ||
868 | |||
869 | return platform_driver_register(&imx6_pcie_driver); | ||
870 | } | ||
871 | device_initcall(imx6_pcie_init); | ||
diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c new file mode 100644 index 000000000000..0682213328e9 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone-dw.c | |||
@@ -0,0 +1,484 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * DesignWare application register space functions for Keystone PCI controller | ||
4 | * | ||
5 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | ||
6 | * http://www.ti.com | ||
7 | * | ||
8 | * Author: Murali Karicheri <m-karicheri2@ti.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/irq.h> | ||
12 | #include <linux/irqdomain.h> | ||
13 | #include <linux/irqreturn.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/of_pci.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | #include "pcie-designware.h" | ||
21 | #include "pci-keystone.h" | ||
22 | |||
23 | /* Application register defines */ | ||
24 | #define LTSSM_EN_VAL 1 | ||
25 | #define LTSSM_STATE_MASK 0x1f | ||
26 | #define LTSSM_STATE_L0 0x11 | ||
27 | #define DBI_CS2_EN_VAL 0x20 | ||
28 | #define OB_XLAT_EN_VAL 2 | ||
29 | |||
30 | /* Application registers */ | ||
31 | #define CMD_STATUS 0x004 | ||
32 | #define CFG_SETUP 0x008 | ||
33 | #define OB_SIZE 0x030 | ||
34 | #define CFG_PCIM_WIN_SZ_IDX 3 | ||
35 | #define CFG_PCIM_WIN_CNT 32 | ||
36 | #define SPACE0_REMOTE_CFG_OFFSET 0x1000 | ||
37 | #define OB_OFFSET_INDEX(n) (0x200 + (8 * n)) | ||
38 | #define OB_OFFSET_HI(n) (0x204 + (8 * n)) | ||
39 | |||
40 | /* IRQ register defines */ | ||
41 | #define IRQ_EOI 0x050 | ||
42 | #define IRQ_STATUS 0x184 | ||
43 | #define IRQ_ENABLE_SET 0x188 | ||
44 | #define IRQ_ENABLE_CLR 0x18c | ||
45 | |||
46 | #define MSI_IRQ 0x054 | ||
47 | #define MSI0_IRQ_STATUS 0x104 | ||
48 | #define MSI0_IRQ_ENABLE_SET 0x108 | ||
49 | #define MSI0_IRQ_ENABLE_CLR 0x10c | ||
50 | #define IRQ_STATUS 0x184 | ||
51 | #define MSI_IRQ_OFFSET 4 | ||
52 | |||
53 | /* Error IRQ bits */ | ||
54 | #define ERR_AER BIT(5) /* ECRC error */ | ||
55 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ | ||
56 | #define ERR_CORR BIT(3) /* Correctable error */ | ||
57 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ | ||
58 | #define ERR_FATAL BIT(1) /* Fatal error */ | ||
59 | #define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ | ||
60 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ | ||
61 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) | ||
62 | #define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) | ||
63 | #define ERR_IRQ_STATUS_RAW 0x1c0 | ||
64 | #define ERR_IRQ_STATUS 0x1c4 | ||
65 | #define ERR_IRQ_ENABLE_SET 0x1c8 | ||
66 | #define ERR_IRQ_ENABLE_CLR 0x1cc | ||
67 | |||
68 | /* Config space registers */ | ||
69 | #define DEBUG0 0x728 | ||
70 | |||
71 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) | ||
72 | |||
73 | static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, | ||
74 | u32 *bit_pos) | ||
75 | { | ||
76 | *reg_offset = offset % 8; | ||
77 | *bit_pos = offset >> 3; | ||
78 | } | ||
79 | |||
80 | phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) | ||
81 | { | ||
82 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
83 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
84 | |||
85 | return ks_pcie->app.start + MSI_IRQ; | ||
86 | } | ||
87 | |||
88 | static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) | ||
89 | { | ||
90 | return readl(ks_pcie->va_app_base + offset); | ||
91 | } | ||
92 | |||
93 | static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) | ||
94 | { | ||
95 | writel(val, ks_pcie->va_app_base + offset); | ||
96 | } | ||
97 | |||
98 | void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) | ||
99 | { | ||
100 | struct dw_pcie *pci = ks_pcie->pci; | ||
101 | struct pcie_port *pp = &pci->pp; | ||
102 | struct device *dev = pci->dev; | ||
103 | u32 pending, vector; | ||
104 | int src, virq; | ||
105 | |||
106 | pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); | ||
107 | |||
108 | /* | ||
109 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | ||
110 | * shows 1, 9, 17, 25 and so forth | ||
111 | */ | ||
112 | for (src = 0; src < 4; src++) { | ||
113 | if (BIT(src) & pending) { | ||
114 | vector = offset + (src << 3); | ||
115 | virq = irq_linear_revmap(pp->irq_domain, vector); | ||
116 | dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", | ||
117 | src, vector, virq); | ||
118 | generic_handle_irq(virq); | ||
119 | } | ||
120 | } | ||
121 | } | ||
122 | |||
123 | void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) | ||
124 | { | ||
125 | u32 reg_offset, bit_pos; | ||
126 | struct keystone_pcie *ks_pcie; | ||
127 | struct dw_pcie *pci; | ||
128 | |||
129 | pci = to_dw_pcie_from_pp(pp); | ||
130 | ks_pcie = to_keystone_pcie(pci); | ||
131 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
132 | |||
133 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), | ||
134 | BIT(bit_pos)); | ||
135 | ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); | ||
136 | } | ||
137 | |||
138 | void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) | ||
139 | { | ||
140 | u32 reg_offset, bit_pos; | ||
141 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
142 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
143 | |||
144 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
145 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), | ||
146 | BIT(bit_pos)); | ||
147 | } | ||
148 | |||
149 | void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) | ||
150 | { | ||
151 | u32 reg_offset, bit_pos; | ||
152 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
153 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
154 | |||
155 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
156 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), | ||
157 | BIT(bit_pos)); | ||
158 | } | ||
159 | |||
160 | int ks_dw_pcie_msi_host_init(struct pcie_port *pp) | ||
161 | { | ||
162 | return dw_pcie_allocate_domains(pp); | ||
163 | } | ||
164 | |||
165 | void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) | ||
166 | { | ||
167 | int i; | ||
168 | |||
169 | for (i = 0; i < PCI_NUM_INTX; i++) | ||
170 | ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); | ||
171 | } | ||
172 | |||
173 | void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) | ||
174 | { | ||
175 | struct dw_pcie *pci = ks_pcie->pci; | ||
176 | struct device *dev = pci->dev; | ||
177 | u32 pending; | ||
178 | int virq; | ||
179 | |||
180 | pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); | ||
181 | |||
182 | if (BIT(0) & pending) { | ||
183 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); | ||
184 | dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); | ||
185 | generic_handle_irq(virq); | ||
186 | } | ||
187 | |||
188 | /* EOI the INTx interrupt */ | ||
189 | ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); | ||
190 | } | ||
191 | |||
192 | void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) | ||
193 | { | ||
194 | ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); | ||
195 | } | ||
196 | |||
197 | irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) | ||
198 | { | ||
199 | u32 status; | ||
200 | |||
201 | status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; | ||
202 | if (!status) | ||
203 | return IRQ_NONE; | ||
204 | |||
205 | if (status & ERR_FATAL_IRQ) | ||
206 | dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n", | ||
207 | status); | ||
208 | |||
209 | /* Ack the IRQ; status bits are RW1C */ | ||
210 | ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); | ||
211 | return IRQ_HANDLED; | ||
212 | } | ||
213 | |||
214 | static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) | ||
215 | { | ||
216 | } | ||
217 | |||
218 | static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) | ||
219 | { | ||
220 | } | ||
221 | |||
222 | static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | static struct irq_chip ks_dw_pcie_legacy_irq_chip = { | ||
227 | .name = "Keystone-PCI-Legacy-IRQ", | ||
228 | .irq_ack = ks_dw_pcie_ack_legacy_irq, | ||
229 | .irq_mask = ks_dw_pcie_mask_legacy_irq, | ||
230 | .irq_unmask = ks_dw_pcie_unmask_legacy_irq, | ||
231 | }; | ||
232 | |||
233 | static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, | ||
234 | unsigned int irq, irq_hw_number_t hw_irq) | ||
235 | { | ||
236 | irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, | ||
237 | handle_level_irq); | ||
238 | irq_set_chip_data(irq, d->host_data); | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { | ||
244 | .map = ks_dw_pcie_init_legacy_irq_map, | ||
245 | .xlate = irq_domain_xlate_onetwocell, | ||
246 | }; | ||
247 | |||
248 | /** | ||
249 | * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask | ||
250 | * registers | ||
251 | * | ||
252 | * Since modification of dbi_cs2 involves different clock domain, read the | ||
253 | * status back to ensure the transition is complete. | ||
254 | */ | ||
255 | static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) | ||
256 | { | ||
257 | u32 val; | ||
258 | |||
259 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
260 | ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); | ||
261 | |||
262 | do { | ||
263 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
264 | } while (!(val & DBI_CS2_EN_VAL)); | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode | ||
269 | * | ||
270 | * Since modification of dbi_cs2 involves different clock domain, read the | ||
271 | * status back to ensure the transition is complete. | ||
272 | */ | ||
273 | static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) | ||
274 | { | ||
275 | u32 val; | ||
276 | |||
277 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
278 | ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); | ||
279 | |||
280 | do { | ||
281 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
282 | } while (val & DBI_CS2_EN_VAL); | ||
283 | } | ||
284 | |||
285 | void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) | ||
286 | { | ||
287 | struct dw_pcie *pci = ks_pcie->pci; | ||
288 | struct pcie_port *pp = &pci->pp; | ||
289 | u32 start = pp->mem->start, end = pp->mem->end; | ||
290 | int i, tr_size; | ||
291 | u32 val; | ||
292 | |||
293 | /* Disable BARs for inbound access */ | ||
294 | ks_dw_pcie_set_dbi_mode(ks_pcie); | ||
295 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); | ||
296 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); | ||
297 | ks_dw_pcie_clear_dbi_mode(ks_pcie); | ||
298 | |||
299 | /* Set outbound translation size per window division */ | ||
300 | ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); | ||
301 | |||
302 | tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; | ||
303 | |||
304 | /* Using Direct 1:1 mapping of RC <-> PCI memory space */ | ||
305 | for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { | ||
306 | ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); | ||
307 | ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); | ||
308 | start += tr_size; | ||
309 | } | ||
310 | |||
311 | /* Enable OB translation */ | ||
312 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
313 | ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); | ||
314 | } | ||
315 | |||
316 | /** | ||
317 | * ks_pcie_cfg_setup() - Set up configuration space address for a device | ||
318 | * | ||
319 | * @ks_pcie: ptr to keystone_pcie structure | ||
320 | * @bus: Bus number the device is residing on | ||
321 | * @devfn: device, function number info | ||
322 | * | ||
323 | * Forms and returns the address of configuration space mapped in PCIESS | ||
324 | * address space 0. Also configures CFG_SETUP for remote configuration space | ||
325 | * access. | ||
326 | * | ||
327 | * The address space has two regions to access configuration - local and remote. | ||
328 | * We access local region for bus 0 (as RC is attached on bus 0) and remote | ||
329 | * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, | ||
330 | * we will do TYPE 0 access as it will be on our secondary bus (logical). | ||
331 | * CFG_SETUP is needed only for remote configuration access. | ||
332 | */ | ||
333 | static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, | ||
334 | unsigned int devfn) | ||
335 | { | ||
336 | u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); | ||
337 | struct dw_pcie *pci = ks_pcie->pci; | ||
338 | struct pcie_port *pp = &pci->pp; | ||
339 | u32 regval; | ||
340 | |||
341 | if (bus == 0) | ||
342 | return pci->dbi_base; | ||
343 | |||
344 | regval = (bus << 16) | (device << 8) | function; | ||
345 | |||
346 | /* | ||
347 | * Since Bus#1 will be a virtual bus, we need to have TYPE0 | ||
348 | * access only. | ||
349 | * TYPE 1 | ||
350 | */ | ||
351 | if (bus != 1) | ||
352 | regval |= BIT(24); | ||
353 | |||
354 | ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); | ||
355 | return pp->va_cfg0_base; | ||
356 | } | ||
357 | |||
358 | int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
359 | unsigned int devfn, int where, int size, u32 *val) | ||
360 | { | ||
361 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
362 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
363 | u8 bus_num = bus->number; | ||
364 | void __iomem *addr; | ||
365 | |||
366 | addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); | ||
367 | |||
368 | return dw_pcie_read(addr + where, size, val); | ||
369 | } | ||
370 | |||
371 | int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
372 | unsigned int devfn, int where, int size, u32 val) | ||
373 | { | ||
374 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
375 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
376 | u8 bus_num = bus->number; | ||
377 | void __iomem *addr; | ||
378 | |||
379 | addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); | ||
380 | |||
381 | return dw_pcie_write(addr + where, size, val); | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization | ||
386 | * | ||
387 | * This sets BAR0 to enable inbound access for MSI_IRQ register | ||
388 | */ | ||
389 | void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) | ||
390 | { | ||
391 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
392 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
393 | |||
394 | /* Configure and set up BAR0 */ | ||
395 | ks_dw_pcie_set_dbi_mode(ks_pcie); | ||
396 | |||
397 | /* Enable BAR0 */ | ||
398 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); | ||
399 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); | ||
400 | |||
401 | ks_dw_pcie_clear_dbi_mode(ks_pcie); | ||
402 | |||
403 | /* | ||
404 | * For BAR0, just setting bus address for inbound writes (MSI) should | ||
405 | * be sufficient. Use physical address to avoid any conflicts. | ||
406 | */ | ||
407 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); | ||
408 | } | ||
409 | |||
410 | /** | ||
411 | * ks_dw_pcie_link_up() - Check if link up | ||
412 | */ | ||
413 | int ks_dw_pcie_link_up(struct dw_pcie *pci) | ||
414 | { | ||
415 | u32 val; | ||
416 | |||
417 | val = dw_pcie_readl_dbi(pci, DEBUG0); | ||
418 | return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; | ||
419 | } | ||
420 | |||
421 | void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) | ||
422 | { | ||
423 | u32 val; | ||
424 | |||
425 | /* Disable Link training */ | ||
426 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
427 | val &= ~LTSSM_EN_VAL; | ||
428 | ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
429 | |||
430 | /* Initiate Link Training */ | ||
431 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
432 | ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
433 | } | ||
434 | |||
435 | /** | ||
436 | * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware | ||
437 | * | ||
438 | * Ioremap the register resources, initialize legacy irq domain | ||
439 | * and call dw_pcie_v3_65_host_init() API to initialize the Keystone | ||
440 | * PCI host controller. | ||
441 | */ | ||
442 | int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, | ||
443 | struct device_node *msi_intc_np) | ||
444 | { | ||
445 | struct dw_pcie *pci = ks_pcie->pci; | ||
446 | struct pcie_port *pp = &pci->pp; | ||
447 | struct device *dev = pci->dev; | ||
448 | struct platform_device *pdev = to_platform_device(dev); | ||
449 | struct resource *res; | ||
450 | |||
451 | /* Index 0 is the config reg. space address */ | ||
452 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
453 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); | ||
454 | if (IS_ERR(pci->dbi_base)) | ||
455 | return PTR_ERR(pci->dbi_base); | ||
456 | |||
457 | /* | ||
458 | * We set these same and is used in pcie rd/wr_other_conf | ||
459 | * functions | ||
460 | */ | ||
461 | pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; | ||
462 | pp->va_cfg1_base = pp->va_cfg0_base; | ||
463 | |||
464 | /* Index 1 is the application reg. space address */ | ||
465 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
466 | ks_pcie->va_app_base = devm_ioremap_resource(dev, res); | ||
467 | if (IS_ERR(ks_pcie->va_app_base)) | ||
468 | return PTR_ERR(ks_pcie->va_app_base); | ||
469 | |||
470 | ks_pcie->app = *res; | ||
471 | |||
472 | /* Create legacy IRQ domain */ | ||
473 | ks_pcie->legacy_irq_domain = | ||
474 | irq_domain_add_linear(ks_pcie->legacy_intc_np, | ||
475 | PCI_NUM_INTX, | ||
476 | &ks_dw_pcie_legacy_irq_domain_ops, | ||
477 | NULL); | ||
478 | if (!ks_pcie->legacy_irq_domain) { | ||
479 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); | ||
480 | return -EINVAL; | ||
481 | } | ||
482 | |||
483 | return dw_pcie_host_init(pp); | ||
484 | } | ||
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c new file mode 100644 index 000000000000..3722a5f31e5e --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.c | |||
@@ -0,0 +1,457 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for Texas Instruments Keystone SoCs | ||
4 | * | ||
5 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | ||
6 | * http://www.ti.com | ||
7 | * | ||
8 | * Author: Murali Karicheri <m-karicheri2@ti.com> | ||
9 | * Implementation based on pci-exynos.c and pcie-designware.c | ||
10 | */ | ||
11 | |||
12 | #include <linux/irqchip/chained_irq.h> | ||
13 | #include <linux/clk.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/irqdomain.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/msi.h> | ||
19 | #include <linux/of_irq.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/of_pci.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/phy/phy.h> | ||
24 | #include <linux/resource.h> | ||
25 | #include <linux/signal.h> | ||
26 | |||
27 | #include "pcie-designware.h" | ||
28 | #include "pci-keystone.h" | ||
29 | |||
30 | #define DRIVER_NAME "keystone-pcie" | ||
31 | |||
32 | /* DEV_STAT_CTRL */ | ||
33 | #define PCIE_CAP_BASE 0x70 | ||
34 | |||
35 | /* PCIE controller device IDs */ | ||
36 | #define PCIE_RC_K2HK 0xb008 | ||
37 | #define PCIE_RC_K2E 0xb009 | ||
38 | #define PCIE_RC_K2L 0xb00a | ||
39 | |||
40 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) | ||
41 | |||
42 | static void quirk_limit_mrrs(struct pci_dev *dev) | ||
43 | { | ||
44 | struct pci_bus *bus = dev->bus; | ||
45 | struct pci_dev *bridge = bus->self; | ||
46 | static const struct pci_device_id rc_pci_devids[] = { | ||
47 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), | ||
48 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | ||
49 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), | ||
50 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | ||
51 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), | ||
52 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | ||
53 | { 0, }, | ||
54 | }; | ||
55 | |||
56 | if (pci_is_root_bus(bus)) | ||
57 | return; | ||
58 | |||
59 | /* look for the host bridge */ | ||
60 | while (!pci_is_root_bus(bus)) { | ||
61 | bridge = bus->self; | ||
62 | bus = bus->parent; | ||
63 | } | ||
64 | |||
65 | if (bridge) { | ||
66 | /* | ||
67 | * Keystone PCI controller has a h/w limitation of | ||
68 | * 256 bytes maximum read request size. It can't handle | ||
69 | * anything higher than this. So force this limit on | ||
70 | * all downstream devices. | ||
71 | */ | ||
72 | if (pci_match_id(rc_pci_devids, bridge)) { | ||
73 | if (pcie_get_readrq(dev) > 256) { | ||
74 | dev_info(&dev->dev, "limiting MRRS to 256\n"); | ||
75 | pcie_set_readrq(dev, 256); | ||
76 | } | ||
77 | } | ||
78 | } | ||
79 | } | ||
80 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); | ||
81 | |||
82 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) | ||
83 | { | ||
84 | struct dw_pcie *pci = ks_pcie->pci; | ||
85 | struct pcie_port *pp = &pci->pp; | ||
86 | struct device *dev = pci->dev; | ||
87 | unsigned int retries; | ||
88 | |||
89 | dw_pcie_setup_rc(pp); | ||
90 | |||
91 | if (dw_pcie_link_up(pci)) { | ||
92 | dev_info(dev, "Link already up\n"); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* check if the link is up or not */ | ||
97 | for (retries = 0; retries < 5; retries++) { | ||
98 | ks_dw_pcie_initiate_link_train(ks_pcie); | ||
99 | if (!dw_pcie_wait_for_link(pci)) | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | dev_err(dev, "phy link never came up\n"); | ||
104 | return -ETIMEDOUT; | ||
105 | } | ||
106 | |||
107 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) | ||
108 | { | ||
109 | unsigned int irq = irq_desc_get_irq(desc); | ||
110 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | ||
111 | u32 offset = irq - ks_pcie->msi_host_irqs[0]; | ||
112 | struct dw_pcie *pci = ks_pcie->pci; | ||
113 | struct device *dev = pci->dev; | ||
114 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
115 | |||
116 | dev_dbg(dev, "%s, irq %d\n", __func__, irq); | ||
117 | |||
118 | /* | ||
119 | * The chained irq handler installation would have replaced normal | ||
120 | * interrupt driver handler so we need to take care of mask/unmask and | ||
121 | * ack operation. | ||
122 | */ | ||
123 | chained_irq_enter(chip, desc); | ||
124 | ks_dw_pcie_handle_msi_irq(ks_pcie, offset); | ||
125 | chained_irq_exit(chip, desc); | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * ks_pcie_legacy_irq_handler() - Handle legacy interrupt | ||
130 | * @irq: IRQ line for legacy interrupts | ||
131 | * @desc: Pointer to irq descriptor | ||
132 | * | ||
133 | * Traverse through pending legacy interrupts and invoke handler for each. Also | ||
134 | * takes care of interrupt controller level mask/ack operation. | ||
135 | */ | ||
136 | static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) | ||
137 | { | ||
138 | unsigned int irq = irq_desc_get_irq(desc); | ||
139 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | ||
140 | struct dw_pcie *pci = ks_pcie->pci; | ||
141 | struct device *dev = pci->dev; | ||
142 | u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; | ||
143 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
144 | |||
145 | dev_dbg(dev, ": Handling legacy irq %d\n", irq); | ||
146 | |||
147 | /* | ||
148 | * The chained irq handler installation would have replaced normal | ||
149 | * interrupt driver handler so we need to take care of mask/unmask and | ||
150 | * ack operation. | ||
151 | */ | ||
152 | chained_irq_enter(chip, desc); | ||
153 | ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); | ||
154 | chained_irq_exit(chip, desc); | ||
155 | } | ||
156 | |||
157 | static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | ||
158 | char *controller, int *num_irqs) | ||
159 | { | ||
160 | int temp, max_host_irqs, legacy = 1, *host_irqs; | ||
161 | struct device *dev = ks_pcie->pci->dev; | ||
162 | struct device_node *np_pcie = dev->of_node, **np_temp; | ||
163 | |||
164 | if (!strcmp(controller, "msi-interrupt-controller")) | ||
165 | legacy = 0; | ||
166 | |||
167 | if (legacy) { | ||
168 | np_temp = &ks_pcie->legacy_intc_np; | ||
169 | max_host_irqs = PCI_NUM_INTX; | ||
170 | host_irqs = &ks_pcie->legacy_host_irqs[0]; | ||
171 | } else { | ||
172 | np_temp = &ks_pcie->msi_intc_np; | ||
173 | max_host_irqs = MAX_MSI_HOST_IRQS; | ||
174 | host_irqs = &ks_pcie->msi_host_irqs[0]; | ||
175 | } | ||
176 | |||
177 | /* interrupt controller is in a child node */ | ||
178 | *np_temp = of_get_child_by_name(np_pcie, controller); | ||
179 | if (!(*np_temp)) { | ||
180 | dev_err(dev, "Node for %s is absent\n", controller); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | |||
184 | temp = of_irq_count(*np_temp); | ||
185 | if (!temp) { | ||
186 | dev_err(dev, "No IRQ entries in %s\n", controller); | ||
187 | of_node_put(*np_temp); | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | |||
191 | if (temp > max_host_irqs) | ||
192 | dev_warn(dev, "Too many %s interrupts defined %u\n", | ||
193 | (legacy ? "legacy" : "MSI"), temp); | ||
194 | |||
195 | /* | ||
196 | * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to | ||
197 | * 7 (MSI) | ||
198 | */ | ||
199 | for (temp = 0; temp < max_host_irqs; temp++) { | ||
200 | host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); | ||
201 | if (!host_irqs[temp]) | ||
202 | break; | ||
203 | } | ||
204 | |||
205 | of_node_put(*np_temp); | ||
206 | |||
207 | if (temp) { | ||
208 | *num_irqs = temp; | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | return -EINVAL; | ||
213 | } | ||
214 | |||
215 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | ||
216 | { | ||
217 | int i; | ||
218 | |||
219 | /* Legacy IRQ */ | ||
220 | for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { | ||
221 | irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], | ||
222 | ks_pcie_legacy_irq_handler, | ||
223 | ks_pcie); | ||
224 | } | ||
225 | ks_dw_pcie_enable_legacy_irqs(ks_pcie); | ||
226 | |||
227 | /* MSI IRQ */ | ||
228 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
229 | for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { | ||
230 | irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], | ||
231 | ks_pcie_msi_irq_handler, | ||
232 | ks_pcie); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | if (ks_pcie->error_irq > 0) | ||
237 | ks_dw_pcie_enable_error_irq(ks_pcie); | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * When a PCI device does not exist during config cycles, keystone host gets a | ||
242 | * bus error instead of returning 0xffffffff. This handler always returns 0 | ||
243 | * for this kind of faults. | ||
244 | */ | ||
245 | static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, | ||
246 | struct pt_regs *regs) | ||
247 | { | ||
248 | unsigned long instr = *(unsigned long *) instruction_pointer(regs); | ||
249 | |||
250 | if ((instr & 0x0e100090) == 0x00100090) { | ||
251 | int reg = (instr >> 12) & 15; | ||
252 | |||
253 | regs->uregs[reg] = -1; | ||
254 | regs->ARM_pc += 4; | ||
255 | } | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int __init ks_pcie_host_init(struct pcie_port *pp) | ||
261 | { | ||
262 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
263 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
264 | u32 val; | ||
265 | |||
266 | ks_pcie_establish_link(ks_pcie); | ||
267 | ks_dw_pcie_setup_rc_app_regs(ks_pcie); | ||
268 | ks_pcie_setup_interrupts(ks_pcie); | ||
269 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), | ||
270 | pci->dbi_base + PCI_IO_BASE); | ||
271 | |||
272 | /* update the Vendor ID */ | ||
273 | writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID); | ||
274 | |||
275 | /* update the DEV_STAT_CTRL to publish right mrrs */ | ||
276 | val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); | ||
277 | val &= ~PCI_EXP_DEVCTL_READRQ; | ||
278 | /* set the mrrs to 256 bytes */ | ||
279 | val |= BIT(12); | ||
280 | writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); | ||
281 | |||
282 | /* | ||
283 | * PCIe access errors that result into OCP errors are caught by ARM as | ||
284 | * "External aborts" | ||
285 | */ | ||
286 | hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, | ||
287 | "Asynchronous external abort"); | ||
288 | |||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static const struct dw_pcie_host_ops keystone_pcie_host_ops = { | ||
293 | .rd_other_conf = ks_dw_pcie_rd_other_conf, | ||
294 | .wr_other_conf = ks_dw_pcie_wr_other_conf, | ||
295 | .host_init = ks_pcie_host_init, | ||
296 | .msi_set_irq = ks_dw_pcie_msi_set_irq, | ||
297 | .msi_clear_irq = ks_dw_pcie_msi_clear_irq, | ||
298 | .get_msi_addr = ks_dw_pcie_get_msi_addr, | ||
299 | .msi_host_init = ks_dw_pcie_msi_host_init, | ||
300 | .msi_irq_ack = ks_dw_pcie_msi_irq_ack, | ||
301 | .scan_bus = ks_dw_pcie_v3_65_scan_bus, | ||
302 | }; | ||
303 | |||
304 | static irqreturn_t pcie_err_irq_handler(int irq, void *priv) | ||
305 | { | ||
306 | struct keystone_pcie *ks_pcie = priv; | ||
307 | |||
308 | return ks_dw_pcie_handle_error_irq(ks_pcie); | ||
309 | } | ||
310 | |||
311 | static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | ||
312 | struct platform_device *pdev) | ||
313 | { | ||
314 | struct dw_pcie *pci = ks_pcie->pci; | ||
315 | struct pcie_port *pp = &pci->pp; | ||
316 | struct device *dev = &pdev->dev; | ||
317 | int ret; | ||
318 | |||
319 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | ||
320 | "legacy-interrupt-controller", | ||
321 | &ks_pcie->num_legacy_host_irqs); | ||
322 | if (ret) | ||
323 | return ret; | ||
324 | |||
325 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
326 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | ||
327 | "msi-interrupt-controller", | ||
328 | &ks_pcie->num_msi_host_irqs); | ||
329 | if (ret) | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * Index 0 is the platform interrupt for error interrupt | ||
335 | * from RC. This is optional. | ||
336 | */ | ||
337 | ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); | ||
338 | if (ks_pcie->error_irq <= 0) | ||
339 | dev_info(dev, "no error IRQ defined\n"); | ||
340 | else { | ||
341 | ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, | ||
342 | IRQF_SHARED, "pcie-error-irq", ks_pcie); | ||
343 | if (ret < 0) { | ||
344 | dev_err(dev, "failed to request error IRQ %d\n", | ||
345 | ks_pcie->error_irq); | ||
346 | return ret; | ||
347 | } | ||
348 | } | ||
349 | |||
350 | pp->root_bus_nr = -1; | ||
351 | pp->ops = &keystone_pcie_host_ops; | ||
352 | ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); | ||
353 | if (ret) { | ||
354 | dev_err(dev, "failed to initialize host\n"); | ||
355 | return ret; | ||
356 | } | ||
357 | |||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static const struct of_device_id ks_pcie_of_match[] = { | ||
362 | { | ||
363 | .type = "pci", | ||
364 | .compatible = "ti,keystone-pcie", | ||
365 | }, | ||
366 | { }, | ||
367 | }; | ||
368 | |||
369 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
370 | .link_up = ks_dw_pcie_link_up, | ||
371 | }; | ||
372 | |||
373 | static int __exit ks_pcie_remove(struct platform_device *pdev) | ||
374 | { | ||
375 | struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); | ||
376 | |||
377 | clk_disable_unprepare(ks_pcie->clk); | ||
378 | |||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static int __init ks_pcie_probe(struct platform_device *pdev) | ||
383 | { | ||
384 | struct device *dev = &pdev->dev; | ||
385 | struct dw_pcie *pci; | ||
386 | struct keystone_pcie *ks_pcie; | ||
387 | struct resource *res; | ||
388 | void __iomem *reg_p; | ||
389 | struct phy *phy; | ||
390 | int ret; | ||
391 | |||
392 | ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); | ||
393 | if (!ks_pcie) | ||
394 | return -ENOMEM; | ||
395 | |||
396 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
397 | if (!pci) | ||
398 | return -ENOMEM; | ||
399 | |||
400 | pci->dev = dev; | ||
401 | pci->ops = &dw_pcie_ops; | ||
402 | |||
403 | ks_pcie->pci = pci; | ||
404 | |||
405 | /* initialize SerDes Phy if present */ | ||
406 | phy = devm_phy_get(dev, "pcie-phy"); | ||
407 | if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) | ||
408 | return PTR_ERR(phy); | ||
409 | |||
410 | if (!IS_ERR_OR_NULL(phy)) { | ||
411 | ret = phy_init(phy); | ||
412 | if (ret < 0) | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | /* index 2 is to read PCI DEVICE_ID */ | ||
417 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
418 | reg_p = devm_ioremap_resource(dev, res); | ||
419 | if (IS_ERR(reg_p)) | ||
420 | return PTR_ERR(reg_p); | ||
421 | ks_pcie->device_id = readl(reg_p) >> 16; | ||
422 | devm_iounmap(dev, reg_p); | ||
423 | devm_release_mem_region(dev, res->start, resource_size(res)); | ||
424 | |||
425 | ks_pcie->np = dev->of_node; | ||
426 | platform_set_drvdata(pdev, ks_pcie); | ||
427 | ks_pcie->clk = devm_clk_get(dev, "pcie"); | ||
428 | if (IS_ERR(ks_pcie->clk)) { | ||
429 | dev_err(dev, "Failed to get pcie rc clock\n"); | ||
430 | return PTR_ERR(ks_pcie->clk); | ||
431 | } | ||
432 | ret = clk_prepare_enable(ks_pcie->clk); | ||
433 | if (ret) | ||
434 | return ret; | ||
435 | |||
436 | platform_set_drvdata(pdev, ks_pcie); | ||
437 | |||
438 | ret = ks_add_pcie_port(ks_pcie, pdev); | ||
439 | if (ret < 0) | ||
440 | goto fail_clk; | ||
441 | |||
442 | return 0; | ||
443 | fail_clk: | ||
444 | clk_disable_unprepare(ks_pcie->clk); | ||
445 | |||
446 | return ret; | ||
447 | } | ||
448 | |||
449 | static struct platform_driver ks_pcie_driver __refdata = { | ||
450 | .probe = ks_pcie_probe, | ||
451 | .remove = __exit_p(ks_pcie_remove), | ||
452 | .driver = { | ||
453 | .name = "keystone-pcie", | ||
454 | .of_match_table = of_match_ptr(ks_pcie_of_match), | ||
455 | }, | ||
456 | }; | ||
457 | builtin_platform_driver(ks_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h new file mode 100644 index 000000000000..8a13da391543 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Keystone PCI Controller's common includes | ||
4 | * | ||
5 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | ||
6 | * http://www.ti.com | ||
7 | * | ||
8 | * Author: Murali Karicheri <m-karicheri2@ti.com> | ||
9 | */ | ||
10 | |||
11 | #define MAX_MSI_HOST_IRQS 8 | ||
12 | |||
13 | struct keystone_pcie { | ||
14 | struct dw_pcie *pci; | ||
15 | struct clk *clk; | ||
16 | /* PCI Device ID */ | ||
17 | u32 device_id; | ||
18 | int num_legacy_host_irqs; | ||
19 | int legacy_host_irqs[PCI_NUM_INTX]; | ||
20 | struct device_node *legacy_intc_np; | ||
21 | |||
22 | int num_msi_host_irqs; | ||
23 | int msi_host_irqs[MAX_MSI_HOST_IRQS]; | ||
24 | struct device_node *msi_intc_np; | ||
25 | struct irq_domain *legacy_irq_domain; | ||
26 | struct device_node *np; | ||
27 | |||
28 | int error_irq; | ||
29 | |||
30 | /* Application register space */ | ||
31 | void __iomem *va_app_base; /* DT 1st resource */ | ||
32 | struct resource app; | ||
33 | }; | ||
34 | |||
35 | /* Keystone DW specific MSI controller APIs/definitions */ | ||
36 | void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); | ||
37 | phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); | ||
38 | |||
39 | /* Keystone specific PCI controller APIs */ | ||
40 | void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); | ||
41 | void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); | ||
42 | void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie); | ||
43 | irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie); | ||
44 | int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, | ||
45 | struct device_node *msi_intc_np); | ||
46 | int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
47 | unsigned int devfn, int where, int size, u32 val); | ||
48 | int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
49 | unsigned int devfn, int where, int size, u32 *val); | ||
50 | void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); | ||
51 | void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); | ||
52 | void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp); | ||
53 | void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); | ||
54 | void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); | ||
55 | void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); | ||
56 | int ks_dw_pcie_msi_host_init(struct pcie_port *pp); | ||
57 | int ks_dw_pcie_link_up(struct dw_pcie *pci); | ||
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c new file mode 100644 index 000000000000..3724d3ef7008 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-layerscape.c | |||
@@ -0,0 +1,341 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for Freescale Layerscape SoCs | ||
4 | * | ||
5 | * Copyright (C) 2014 Freescale Semiconductor. | ||
6 | * | ||
7 | * Author: Minghuan Lian <Minghuan.Lian@freescale.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/of_pci.h> | ||
14 | #include <linux/of_platform.h> | ||
15 | #include <linux/of_irq.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/resource.h> | ||
20 | #include <linux/mfd/syscon.h> | ||
21 | #include <linux/regmap.h> | ||
22 | |||
23 | #include "pcie-designware.h" | ||
24 | |||
25 | /* PEX1/2 Misc Ports Status Register */ | ||
26 | #define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) | ||
27 | #define LTSSM_STATE_SHIFT 20 | ||
28 | #define LTSSM_STATE_MASK 0x3f | ||
29 | #define LTSSM_PCIE_L0 0x11 /* L0 state */ | ||
30 | |||
31 | /* PEX Internal Configuration Registers */ | ||
32 | #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ | ||
33 | #define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ | ||
34 | #define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ | ||
35 | |||
36 | #define PCIE_IATU_NUM 6 | ||
37 | |||
38 | struct ls_pcie_drvdata { | ||
39 | u32 lut_offset; | ||
40 | u32 ltssm_shift; | ||
41 | u32 lut_dbg; | ||
42 | const struct dw_pcie_host_ops *ops; | ||
43 | const struct dw_pcie_ops *dw_pcie_ops; | ||
44 | }; | ||
45 | |||
46 | struct ls_pcie { | ||
47 | struct dw_pcie *pci; | ||
48 | void __iomem *lut; | ||
49 | struct regmap *scfg; | ||
50 | const struct ls_pcie_drvdata *drvdata; | ||
51 | int index; | ||
52 | }; | ||
53 | |||
54 | #define to_ls_pcie(x) dev_get_drvdata((x)->dev) | ||
55 | |||
56 | static bool ls_pcie_is_bridge(struct ls_pcie *pcie) | ||
57 | { | ||
58 | struct dw_pcie *pci = pcie->pci; | ||
59 | u32 header_type; | ||
60 | |||
61 | header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE); | ||
62 | header_type &= 0x7f; | ||
63 | |||
64 | return header_type == PCI_HEADER_TYPE_BRIDGE; | ||
65 | } | ||
66 | |||
67 | /* Clear multi-function bit */ | ||
68 | static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) | ||
69 | { | ||
70 | struct dw_pcie *pci = pcie->pci; | ||
71 | |||
72 | iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); | ||
73 | } | ||
74 | |||
75 | /* Drop MSG TLP except for Vendor MSG */ | ||
76 | static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) | ||
77 | { | ||
78 | u32 val; | ||
79 | struct dw_pcie *pci = pcie->pci; | ||
80 | |||
81 | val = ioread32(pci->dbi_base + PCIE_STRFMR1); | ||
82 | val &= 0xDFFFFFFF; | ||
83 | iowrite32(val, pci->dbi_base + PCIE_STRFMR1); | ||
84 | } | ||
85 | |||
86 | static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) | ||
87 | { | ||
88 | int i; | ||
89 | |||
90 | for (i = 0; i < PCIE_IATU_NUM; i++) | ||
91 | dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); | ||
92 | } | ||
93 | |||
94 | static int ls1021_pcie_link_up(struct dw_pcie *pci) | ||
95 | { | ||
96 | u32 state; | ||
97 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
98 | |||
99 | if (!pcie->scfg) | ||
100 | return 0; | ||
101 | |||
102 | regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); | ||
103 | state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; | ||
104 | |||
105 | if (state < LTSSM_PCIE_L0) | ||
106 | return 0; | ||
107 | |||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | static int ls_pcie_link_up(struct dw_pcie *pci) | ||
112 | { | ||
113 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
114 | u32 state; | ||
115 | |||
116 | state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> | ||
117 | pcie->drvdata->ltssm_shift) & | ||
118 | LTSSM_STATE_MASK; | ||
119 | |||
120 | if (state < LTSSM_PCIE_L0) | ||
121 | return 0; | ||
122 | |||
123 | return 1; | ||
124 | } | ||
125 | |||
126 | /* Forward error response of outbound non-posted requests */ | ||
127 | static void ls_pcie_fix_error_response(struct ls_pcie *pcie) | ||
128 | { | ||
129 | struct dw_pcie *pci = pcie->pci; | ||
130 | |||
131 | iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); | ||
132 | } | ||
133 | |||
134 | static int ls_pcie_host_init(struct pcie_port *pp) | ||
135 | { | ||
136 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
137 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
138 | |||
139 | /* | ||
140 | * Disable outbound windows configured by the bootloader to avoid | ||
141 | * one transaction hitting multiple outbound windows. | ||
142 | * dw_pcie_setup_rc() will reconfigure the outbound windows. | ||
143 | */ | ||
144 | ls_pcie_disable_outbound_atus(pcie); | ||
145 | ls_pcie_fix_error_response(pcie); | ||
146 | |||
147 | dw_pcie_dbi_ro_wr_en(pci); | ||
148 | ls_pcie_clear_multifunction(pcie); | ||
149 | dw_pcie_dbi_ro_wr_dis(pci); | ||
150 | |||
151 | ls_pcie_drop_msg_tlp(pcie); | ||
152 | |||
153 | dw_pcie_setup_rc(pp); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int ls1021_pcie_host_init(struct pcie_port *pp) | ||
159 | { | ||
160 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
161 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
162 | struct device *dev = pci->dev; | ||
163 | u32 index[2]; | ||
164 | int ret; | ||
165 | |||
166 | pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, | ||
167 | "fsl,pcie-scfg"); | ||
168 | if (IS_ERR(pcie->scfg)) { | ||
169 | ret = PTR_ERR(pcie->scfg); | ||
170 | dev_err(dev, "No syscfg phandle specified\n"); | ||
171 | pcie->scfg = NULL; | ||
172 | return ret; | ||
173 | } | ||
174 | |||
175 | if (of_property_read_u32_array(dev->of_node, | ||
176 | "fsl,pcie-scfg", index, 2)) { | ||
177 | pcie->scfg = NULL; | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | pcie->index = index[1]; | ||
181 | |||
182 | return ls_pcie_host_init(pp); | ||
183 | } | ||
184 | |||
185 | static int ls_pcie_msi_host_init(struct pcie_port *pp) | ||
186 | { | ||
187 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
188 | struct device *dev = pci->dev; | ||
189 | struct device_node *np = dev->of_node; | ||
190 | struct device_node *msi_node; | ||
191 | |||
192 | /* | ||
193 | * The MSI domain is set by the generic of_msi_configure(). This | ||
194 | * .msi_host_init() function keeps us from doing the default MSI | ||
195 | * domain setup in dw_pcie_host_init() and also enforces the | ||
196 | * requirement that "msi-parent" exists. | ||
197 | */ | ||
198 | msi_node = of_parse_phandle(np, "msi-parent", 0); | ||
199 | if (!msi_node) { | ||
200 | dev_err(dev, "failed to find msi-parent\n"); | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { | ||
208 | .host_init = ls1021_pcie_host_init, | ||
209 | .msi_host_init = ls_pcie_msi_host_init, | ||
210 | }; | ||
211 | |||
212 | static const struct dw_pcie_host_ops ls_pcie_host_ops = { | ||
213 | .host_init = ls_pcie_host_init, | ||
214 | .msi_host_init = ls_pcie_msi_host_init, | ||
215 | }; | ||
216 | |||
217 | static const struct dw_pcie_ops dw_ls1021_pcie_ops = { | ||
218 | .link_up = ls1021_pcie_link_up, | ||
219 | }; | ||
220 | |||
221 | static const struct dw_pcie_ops dw_ls_pcie_ops = { | ||
222 | .link_up = ls_pcie_link_up, | ||
223 | }; | ||
224 | |||
225 | static struct ls_pcie_drvdata ls1021_drvdata = { | ||
226 | .ops = &ls1021_pcie_host_ops, | ||
227 | .dw_pcie_ops = &dw_ls1021_pcie_ops, | ||
228 | }; | ||
229 | |||
230 | static struct ls_pcie_drvdata ls1043_drvdata = { | ||
231 | .lut_offset = 0x10000, | ||
232 | .ltssm_shift = 24, | ||
233 | .lut_dbg = 0x7fc, | ||
234 | .ops = &ls_pcie_host_ops, | ||
235 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
236 | }; | ||
237 | |||
238 | static struct ls_pcie_drvdata ls1046_drvdata = { | ||
239 | .lut_offset = 0x80000, | ||
240 | .ltssm_shift = 24, | ||
241 | .lut_dbg = 0x407fc, | ||
242 | .ops = &ls_pcie_host_ops, | ||
243 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
244 | }; | ||
245 | |||
246 | static struct ls_pcie_drvdata ls2080_drvdata = { | ||
247 | .lut_offset = 0x80000, | ||
248 | .ltssm_shift = 0, | ||
249 | .lut_dbg = 0x7fc, | ||
250 | .ops = &ls_pcie_host_ops, | ||
251 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
252 | }; | ||
253 | |||
254 | static struct ls_pcie_drvdata ls2088_drvdata = { | ||
255 | .lut_offset = 0x80000, | ||
256 | .ltssm_shift = 0, | ||
257 | .lut_dbg = 0x407fc, | ||
258 | .ops = &ls_pcie_host_ops, | ||
259 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
260 | }; | ||
261 | |||
262 | static const struct of_device_id ls_pcie_of_match[] = { | ||
263 | { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata }, | ||
264 | { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, | ||
265 | { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, | ||
266 | { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, | ||
267 | { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, | ||
268 | { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, | ||
269 | { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, | ||
270 | { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, | ||
271 | { }, | ||
272 | }; | ||
273 | |||
274 | static int __init ls_add_pcie_port(struct ls_pcie *pcie) | ||
275 | { | ||
276 | struct dw_pcie *pci = pcie->pci; | ||
277 | struct pcie_port *pp = &pci->pp; | ||
278 | struct device *dev = pci->dev; | ||
279 | int ret; | ||
280 | |||
281 | pp->ops = pcie->drvdata->ops; | ||
282 | |||
283 | ret = dw_pcie_host_init(pp); | ||
284 | if (ret) { | ||
285 | dev_err(dev, "failed to initialize host\n"); | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static int __init ls_pcie_probe(struct platform_device *pdev) | ||
293 | { | ||
294 | struct device *dev = &pdev->dev; | ||
295 | struct dw_pcie *pci; | ||
296 | struct ls_pcie *pcie; | ||
297 | struct resource *dbi_base; | ||
298 | int ret; | ||
299 | |||
300 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
301 | if (!pcie) | ||
302 | return -ENOMEM; | ||
303 | |||
304 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
305 | if (!pci) | ||
306 | return -ENOMEM; | ||
307 | |||
308 | pcie->drvdata = of_device_get_match_data(dev); | ||
309 | |||
310 | pci->dev = dev; | ||
311 | pci->ops = pcie->drvdata->dw_pcie_ops; | ||
312 | |||
313 | pcie->pci = pci; | ||
314 | |||
315 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
316 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); | ||
317 | if (IS_ERR(pci->dbi_base)) | ||
318 | return PTR_ERR(pci->dbi_base); | ||
319 | |||
320 | pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset; | ||
321 | |||
322 | if (!ls_pcie_is_bridge(pcie)) | ||
323 | return -ENODEV; | ||
324 | |||
325 | platform_set_drvdata(pdev, pcie); | ||
326 | |||
327 | ret = ls_add_pcie_port(pcie); | ||
328 | if (ret < 0) | ||
329 | return ret; | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | static struct platform_driver ls_pcie_driver = { | ||
335 | .driver = { | ||
336 | .name = "layerscape-pcie", | ||
337 | .of_match_table = ls_pcie_of_match, | ||
338 | .suppress_bind_attrs = true, | ||
339 | }, | ||
340 | }; | ||
341 | builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); | ||
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c new file mode 100644 index 000000000000..072fd7ecc29f --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-armada8k.c | |||
@@ -0,0 +1,282 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for Marvell Armada-8K SoCs | ||
4 | * | ||
5 | * Armada-8K PCIe Glue Layer Source Code | ||
6 | * | ||
7 | * Copyright (C) 2016 Marvell Technology Group Ltd. | ||
8 | * | ||
9 | * Author: Yehuda Yitshak <yehuday@marvell.com> | ||
10 | * Author: Shadi Ammouri <shadi@marvell.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/clk.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/phy/phy.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/resource.h> | ||
23 | #include <linux/of_pci.h> | ||
24 | #include <linux/of_irq.h> | ||
25 | |||
26 | #include "pcie-designware.h" | ||
27 | |||
28 | struct armada8k_pcie { | ||
29 | struct dw_pcie *pci; | ||
30 | struct clk *clk; | ||
31 | struct clk *clk_reg; | ||
32 | }; | ||
33 | |||
34 | #define PCIE_VENDOR_REGS_OFFSET 0x8000 | ||
35 | |||
36 | #define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) | ||
37 | #define PCIE_APP_LTSSM_EN BIT(2) | ||
38 | #define PCIE_DEVICE_TYPE_SHIFT 4 | ||
39 | #define PCIE_DEVICE_TYPE_MASK 0xF | ||
40 | #define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ | ||
41 | |||
42 | #define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) | ||
43 | #define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) | ||
44 | #define PCIE_GLB_STS_PHY_LINK_UP BIT(9) | ||
45 | |||
46 | #define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) | ||
47 | #define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) | ||
48 | #define PCIE_INT_A_ASSERT_MASK BIT(9) | ||
49 | #define PCIE_INT_B_ASSERT_MASK BIT(10) | ||
50 | #define PCIE_INT_C_ASSERT_MASK BIT(11) | ||
51 | #define PCIE_INT_D_ASSERT_MASK BIT(12) | ||
52 | |||
53 | #define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) | ||
54 | #define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) | ||
55 | #define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) | ||
56 | #define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) | ||
57 | /* | ||
58 | * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write | ||
59 | * allocate | ||
60 | */ | ||
61 | #define ARCACHE_DEFAULT_VALUE 0x3511 | ||
62 | #define AWCACHE_DEFAULT_VALUE 0x5311 | ||
63 | |||
64 | #define DOMAIN_OUTER_SHAREABLE 0x2 | ||
65 | #define AX_USER_DOMAIN_MASK 0x3 | ||
66 | #define AX_USER_DOMAIN_SHIFT 4 | ||
67 | |||
68 | #define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) | ||
69 | |||
70 | static int armada8k_pcie_link_up(struct dw_pcie *pci) | ||
71 | { | ||
72 | u32 reg; | ||
73 | u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; | ||
74 | |||
75 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); | ||
76 | |||
77 | if ((reg & mask) == mask) | ||
78 | return 1; | ||
79 | |||
80 | dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) | ||
85 | { | ||
86 | struct dw_pcie *pci = pcie->pci; | ||
87 | u32 reg; | ||
88 | |||
89 | if (!dw_pcie_link_up(pci)) { | ||
90 | /* Disable LTSSM state machine to enable configuration */ | ||
91 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); | ||
92 | reg &= ~(PCIE_APP_LTSSM_EN); | ||
93 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); | ||
94 | } | ||
95 | |||
96 | /* Set the device to root complex mode */ | ||
97 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); | ||
98 | reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); | ||
99 | reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; | ||
100 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); | ||
101 | |||
102 | /* Set the PCIe master AxCache attributes */ | ||
103 | dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); | ||
104 | dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); | ||
105 | |||
106 | /* Set the PCIe master AxDomain attributes */ | ||
107 | reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG); | ||
108 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); | ||
109 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; | ||
110 | dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg); | ||
111 | |||
112 | reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG); | ||
113 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); | ||
114 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; | ||
115 | dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg); | ||
116 | |||
117 | /* Enable INT A-D interrupts */ | ||
118 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG); | ||
119 | reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | | ||
120 | PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; | ||
121 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg); | ||
122 | |||
123 | if (!dw_pcie_link_up(pci)) { | ||
124 | /* Configuration done. Start LTSSM */ | ||
125 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); | ||
126 | reg |= PCIE_APP_LTSSM_EN; | ||
127 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); | ||
128 | } | ||
129 | |||
130 | /* Wait until the link becomes active again */ | ||
131 | if (dw_pcie_wait_for_link(pci)) | ||
132 | dev_err(pci->dev, "Link not up after reconfiguration\n"); | ||
133 | } | ||
134 | |||
135 | static int armada8k_pcie_host_init(struct pcie_port *pp) | ||
136 | { | ||
137 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
138 | struct armada8k_pcie *pcie = to_armada8k_pcie(pci); | ||
139 | |||
140 | dw_pcie_setup_rc(pp); | ||
141 | armada8k_pcie_establish_link(pcie); | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) | ||
147 | { | ||
148 | struct armada8k_pcie *pcie = arg; | ||
149 | struct dw_pcie *pci = pcie->pci; | ||
150 | u32 val; | ||
151 | |||
152 | /* | ||
153 | * Interrupts are directly handled by the device driver of the | ||
154 | * PCI device. However, they are also latched into the PCIe | ||
155 | * controller, so we simply discard them. | ||
156 | */ | ||
157 | val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG); | ||
158 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val); | ||
159 | |||
160 | return IRQ_HANDLED; | ||
161 | } | ||
162 | |||
163 | static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { | ||
164 | .host_init = armada8k_pcie_host_init, | ||
165 | }; | ||
166 | |||
167 | static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, | ||
168 | struct platform_device *pdev) | ||
169 | { | ||
170 | struct dw_pcie *pci = pcie->pci; | ||
171 | struct pcie_port *pp = &pci->pp; | ||
172 | struct device *dev = &pdev->dev; | ||
173 | int ret; | ||
174 | |||
175 | pp->root_bus_nr = -1; | ||
176 | pp->ops = &armada8k_pcie_host_ops; | ||
177 | |||
178 | pp->irq = platform_get_irq(pdev, 0); | ||
179 | if (pp->irq < 0) { | ||
180 | dev_err(dev, "failed to get irq for port\n"); | ||
181 | return pp->irq; | ||
182 | } | ||
183 | |||
184 | ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, | ||
185 | IRQF_SHARED, "armada8k-pcie", pcie); | ||
186 | if (ret) { | ||
187 | dev_err(dev, "failed to request irq %d\n", pp->irq); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | ret = dw_pcie_host_init(pp); | ||
192 | if (ret) { | ||
193 | dev_err(dev, "failed to initialize host: %d\n", ret); | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
201 | .link_up = armada8k_pcie_link_up, | ||
202 | }; | ||
203 | |||
204 | static int armada8k_pcie_probe(struct platform_device *pdev) | ||
205 | { | ||
206 | struct dw_pcie *pci; | ||
207 | struct armada8k_pcie *pcie; | ||
208 | struct device *dev = &pdev->dev; | ||
209 | struct resource *base; | ||
210 | int ret; | ||
211 | |||
212 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
213 | if (!pcie) | ||
214 | return -ENOMEM; | ||
215 | |||
216 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
217 | if (!pci) | ||
218 | return -ENOMEM; | ||
219 | |||
220 | pci->dev = dev; | ||
221 | pci->ops = &dw_pcie_ops; | ||
222 | |||
223 | pcie->pci = pci; | ||
224 | |||
225 | pcie->clk = devm_clk_get(dev, NULL); | ||
226 | if (IS_ERR(pcie->clk)) | ||
227 | return PTR_ERR(pcie->clk); | ||
228 | |||
229 | ret = clk_prepare_enable(pcie->clk); | ||
230 | if (ret) | ||
231 | return ret; | ||
232 | |||
233 | pcie->clk_reg = devm_clk_get(dev, "reg"); | ||
234 | if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) { | ||
235 | ret = -EPROBE_DEFER; | ||
236 | goto fail; | ||
237 | } | ||
238 | if (!IS_ERR(pcie->clk_reg)) { | ||
239 | ret = clk_prepare_enable(pcie->clk_reg); | ||
240 | if (ret) | ||
241 | goto fail_clkreg; | ||
242 | } | ||
243 | |||
244 | /* Get the dw-pcie unit configuration/control registers base. */ | ||
245 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); | ||
246 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); | ||
247 | if (IS_ERR(pci->dbi_base)) { | ||
248 | dev_err(dev, "couldn't remap regs base %p\n", base); | ||
249 | ret = PTR_ERR(pci->dbi_base); | ||
250 | goto fail_clkreg; | ||
251 | } | ||
252 | |||
253 | platform_set_drvdata(pdev, pcie); | ||
254 | |||
255 | ret = armada8k_add_pcie_port(pcie, pdev); | ||
256 | if (ret) | ||
257 | goto fail_clkreg; | ||
258 | |||
259 | return 0; | ||
260 | |||
261 | fail_clkreg: | ||
262 | clk_disable_unprepare(pcie->clk_reg); | ||
263 | fail: | ||
264 | clk_disable_unprepare(pcie->clk); | ||
265 | |||
266 | return ret; | ||
267 | } | ||
268 | |||
269 | static const struct of_device_id armada8k_pcie_of_match[] = { | ||
270 | { .compatible = "marvell,armada8k-pcie", }, | ||
271 | {}, | ||
272 | }; | ||
273 | |||
274 | static struct platform_driver armada8k_pcie_driver = { | ||
275 | .probe = armada8k_pcie_probe, | ||
276 | .driver = { | ||
277 | .name = "armada8k-pcie", | ||
278 | .of_match_table = of_match_ptr(armada8k_pcie_of_match), | ||
279 | .suppress_bind_attrs = true, | ||
280 | }, | ||
281 | }; | ||
282 | builtin_platform_driver(armada8k_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c new file mode 100644 index 000000000000..321b56cfd5d0 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-artpec6.c | |||
@@ -0,0 +1,618 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for Axis ARTPEC-6 SoC | ||
4 | * | ||
5 | * Author: Niklas Cassel <niklas.cassel@axis.com> | ||
6 | * | ||
7 | * Based on work done by Phil Edworthy <phil@edworthys.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/of_device.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/resource.h> | ||
17 | #include <linux/signal.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/mfd/syscon.h> | ||
21 | #include <linux/regmap.h> | ||
22 | |||
23 | #include "pcie-designware.h" | ||
24 | |||
25 | #define to_artpec6_pcie(x) dev_get_drvdata((x)->dev) | ||
26 | |||
27 | enum artpec_pcie_variants { | ||
28 | ARTPEC6, | ||
29 | ARTPEC7, | ||
30 | }; | ||
31 | |||
32 | struct artpec6_pcie { | ||
33 | struct dw_pcie *pci; | ||
34 | struct regmap *regmap; /* DT axis,syscon-pcie */ | ||
35 | void __iomem *phy_base; /* DT phy */ | ||
36 | enum artpec_pcie_variants variant; | ||
37 | enum dw_pcie_device_mode mode; | ||
38 | }; | ||
39 | |||
40 | struct artpec_pcie_of_data { | ||
41 | enum artpec_pcie_variants variant; | ||
42 | enum dw_pcie_device_mode mode; | ||
43 | }; | ||
44 | |||
45 | static const struct of_device_id artpec6_pcie_of_match[]; | ||
46 | |||
47 | /* PCIe Port Logic registers (memory-mapped) */ | ||
48 | #define PL_OFFSET 0x700 | ||
49 | |||
50 | #define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc) | ||
51 | #define ACK_N_FTS_MASK GENMASK(15, 8) | ||
52 | #define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK) | ||
53 | |||
54 | #define FAST_TRAINING_SEQ_MASK GENMASK(7, 0) | ||
55 | #define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK) | ||
56 | |||
57 | /* ARTPEC-6 specific registers */ | ||
58 | #define PCIECFG 0x18 | ||
59 | #define PCIECFG_DBG_OEN BIT(24) | ||
60 | #define PCIECFG_CORE_RESET_REQ BIT(21) | ||
61 | #define PCIECFG_LTSSM_ENABLE BIT(20) | ||
62 | #define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16) | ||
63 | #define PCIECFG_CLKREQ_B BIT(11) | ||
64 | #define PCIECFG_REFCLK_ENABLE BIT(10) | ||
65 | #define PCIECFG_PLL_ENABLE BIT(9) | ||
66 | #define PCIECFG_PCLK_ENABLE BIT(8) | ||
67 | #define PCIECFG_RISRCREN BIT(4) | ||
68 | #define PCIECFG_MODE_TX_DRV_EN BIT(3) | ||
69 | #define PCIECFG_CISRREN BIT(2) | ||
70 | #define PCIECFG_MACRO_ENABLE BIT(0) | ||
71 | /* ARTPEC-7 specific fields */ | ||
72 | #define PCIECFG_REFCLKSEL BIT(23) | ||
73 | #define PCIECFG_NOC_RESET BIT(3) | ||
74 | |||
75 | #define PCIESTAT 0x1c | ||
76 | /* ARTPEC-7 specific fields */ | ||
77 | #define PCIESTAT_EXTREFCLK BIT(3) | ||
78 | |||
79 | #define NOCCFG 0x40 | ||
80 | #define NOCCFG_ENABLE_CLK_PCIE BIT(4) | ||
81 | #define NOCCFG_POWER_PCIE_IDLEACK BIT(3) | ||
82 | #define NOCCFG_POWER_PCIE_IDLE BIT(2) | ||
83 | #define NOCCFG_POWER_PCIE_IDLEREQ BIT(1) | ||
84 | |||
85 | #define PHY_STATUS 0x118 | ||
86 | #define PHY_COSPLLLOCK BIT(0) | ||
87 | |||
88 | #define PHY_TX_ASIC_OUT 0x4040 | ||
89 | #define PHY_TX_ASIC_OUT_TX_ACK BIT(0) | ||
90 | |||
91 | #define PHY_RX_ASIC_OUT 0x405c | ||
92 | #define PHY_RX_ASIC_OUT_ACK BIT(0) | ||
93 | |||
94 | static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) | ||
95 | { | ||
96 | u32 val; | ||
97 | |||
98 | regmap_read(artpec6_pcie->regmap, offset, &val); | ||
99 | return val; | ||
100 | } | ||
101 | |||
102 | static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val) | ||
103 | { | ||
104 | regmap_write(artpec6_pcie->regmap, offset, val); | ||
105 | } | ||
106 | |||
107 | static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) | ||
108 | { | ||
109 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
110 | struct pcie_port *pp = &pci->pp; | ||
111 | struct dw_pcie_ep *ep = &pci->ep; | ||
112 | |||
113 | switch (artpec6_pcie->mode) { | ||
114 | case DW_PCIE_RC_TYPE: | ||
115 | return pci_addr - pp->cfg0_base; | ||
116 | case DW_PCIE_EP_TYPE: | ||
117 | return pci_addr - ep->phys_base; | ||
118 | default: | ||
119 | dev_err(pci->dev, "UNKNOWN device type\n"); | ||
120 | } | ||
121 | return pci_addr; | ||
122 | } | ||
123 | |||
124 | static int artpec6_pcie_establish_link(struct dw_pcie *pci) | ||
125 | { | ||
126 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
127 | u32 val; | ||
128 | |||
129 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
130 | val |= PCIECFG_LTSSM_ENABLE; | ||
131 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static void artpec6_pcie_stop_link(struct dw_pcie *pci) | ||
137 | { | ||
138 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
139 | u32 val; | ||
140 | |||
141 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
142 | val &= ~PCIECFG_LTSSM_ENABLE; | ||
143 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
144 | } | ||
145 | |||
146 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
147 | .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, | ||
148 | .start_link = artpec6_pcie_establish_link, | ||
149 | .stop_link = artpec6_pcie_stop_link, | ||
150 | }; | ||
151 | |||
152 | static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie) | ||
153 | { | ||
154 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
155 | struct device *dev = pci->dev; | ||
156 | u32 val; | ||
157 | unsigned int retries; | ||
158 | |||
159 | retries = 50; | ||
160 | do { | ||
161 | usleep_range(1000, 2000); | ||
162 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
163 | retries--; | ||
164 | } while (retries && | ||
165 | (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); | ||
166 | if (!retries) | ||
167 | dev_err(dev, "PCIe clock manager did not leave idle state\n"); | ||
168 | |||
169 | retries = 50; | ||
170 | do { | ||
171 | usleep_range(1000, 2000); | ||
172 | val = readl(artpec6_pcie->phy_base + PHY_STATUS); | ||
173 | retries--; | ||
174 | } while (retries && !(val & PHY_COSPLLLOCK)); | ||
175 | if (!retries) | ||
176 | dev_err(dev, "PHY PLL did not lock\n"); | ||
177 | } | ||
178 | |||
179 | static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie) | ||
180 | { | ||
181 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
182 | struct device *dev = pci->dev; | ||
183 | u32 val; | ||
184 | u16 phy_status_tx, phy_status_rx; | ||
185 | unsigned int retries; | ||
186 | |||
187 | retries = 50; | ||
188 | do { | ||
189 | usleep_range(1000, 2000); | ||
190 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
191 | retries--; | ||
192 | } while (retries && | ||
193 | (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); | ||
194 | if (!retries) | ||
195 | dev_err(dev, "PCIe clock manager did not leave idle state\n"); | ||
196 | |||
197 | retries = 50; | ||
198 | do { | ||
199 | usleep_range(1000, 2000); | ||
200 | phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT); | ||
201 | phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT); | ||
202 | retries--; | ||
203 | } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) || | ||
204 | (phy_status_rx & PHY_RX_ASIC_OUT_ACK))); | ||
205 | if (!retries) | ||
206 | dev_err(dev, "PHY did not enter Pn state\n"); | ||
207 | } | ||
208 | |||
209 | static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie) | ||
210 | { | ||
211 | switch (artpec6_pcie->variant) { | ||
212 | case ARTPEC6: | ||
213 | artpec6_pcie_wait_for_phy_a6(artpec6_pcie); | ||
214 | break; | ||
215 | case ARTPEC7: | ||
216 | artpec6_pcie_wait_for_phy_a7(artpec6_pcie); | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie) | ||
222 | { | ||
223 | u32 val; | ||
224 | |||
225 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
226 | val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ | ||
227 | PCIECFG_MODE_TX_DRV_EN | | ||
228 | PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ | ||
229 | PCIECFG_MACRO_ENABLE; | ||
230 | val |= PCIECFG_REFCLK_ENABLE; | ||
231 | val &= ~PCIECFG_DBG_OEN; | ||
232 | val &= ~PCIECFG_CLKREQ_B; | ||
233 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
234 | usleep_range(5000, 6000); | ||
235 | |||
236 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
237 | val |= NOCCFG_ENABLE_CLK_PCIE; | ||
238 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
239 | usleep_range(20, 30); | ||
240 | |||
241 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
242 | val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; | ||
243 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
244 | usleep_range(6000, 7000); | ||
245 | |||
246 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
247 | val &= ~NOCCFG_POWER_PCIE_IDLEREQ; | ||
248 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
249 | } | ||
250 | |||
251 | static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie) | ||
252 | { | ||
253 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
254 | u32 val; | ||
255 | bool extrefclk; | ||
256 | |||
257 | /* Check if external reference clock is connected */ | ||
258 | val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT); | ||
259 | extrefclk = !!(val & PCIESTAT_EXTREFCLK); | ||
260 | dev_dbg(pci->dev, "Using reference clock: %s\n", | ||
261 | extrefclk ? "external" : "internal"); | ||
262 | |||
263 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
264 | val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ | ||
265 | PCIECFG_PCLK_ENABLE; | ||
266 | if (extrefclk) | ||
267 | val |= PCIECFG_REFCLKSEL; | ||
268 | else | ||
269 | val &= ~PCIECFG_REFCLKSEL; | ||
270 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
271 | usleep_range(10, 20); | ||
272 | |||
273 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
274 | val |= NOCCFG_ENABLE_CLK_PCIE; | ||
275 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
276 | usleep_range(20, 30); | ||
277 | |||
278 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
279 | val &= ~NOCCFG_POWER_PCIE_IDLEREQ; | ||
280 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
281 | } | ||
282 | |||
283 | static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie) | ||
284 | { | ||
285 | switch (artpec6_pcie->variant) { | ||
286 | case ARTPEC6: | ||
287 | artpec6_pcie_init_phy_a6(artpec6_pcie); | ||
288 | break; | ||
289 | case ARTPEC7: | ||
290 | artpec6_pcie_init_phy_a7(artpec6_pcie); | ||
291 | break; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie) | ||
296 | { | ||
297 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
298 | u32 val; | ||
299 | |||
300 | if (artpec6_pcie->variant != ARTPEC7) | ||
301 | return; | ||
302 | |||
303 | /* | ||
304 | * Increase the N_FTS (Number of Fast Training Sequences) | ||
305 | * to be transmitted when transitioning from L0s to L0. | ||
306 | */ | ||
307 | val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF); | ||
308 | val &= ~ACK_N_FTS_MASK; | ||
309 | val |= ACK_N_FTS(180); | ||
310 | dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val); | ||
311 | |||
312 | /* | ||
313 | * Set the Number of Fast Training Sequences that the core | ||
314 | * advertises as its N_FTS during Gen2 or Gen3 link training. | ||
315 | */ | ||
316 | val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
317 | val &= ~FAST_TRAINING_SEQ_MASK; | ||
318 | val |= FAST_TRAINING_SEQ(180); | ||
319 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); | ||
320 | } | ||
321 | |||
322 | static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie) | ||
323 | { | ||
324 | u32 val; | ||
325 | |||
326 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
327 | switch (artpec6_pcie->variant) { | ||
328 | case ARTPEC6: | ||
329 | val |= PCIECFG_CORE_RESET_REQ; | ||
330 | break; | ||
331 | case ARTPEC7: | ||
332 | val &= ~PCIECFG_NOC_RESET; | ||
333 | break; | ||
334 | } | ||
335 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
336 | } | ||
337 | |||
338 | static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie) | ||
339 | { | ||
340 | u32 val; | ||
341 | |||
342 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
343 | switch (artpec6_pcie->variant) { | ||
344 | case ARTPEC6: | ||
345 | val &= ~PCIECFG_CORE_RESET_REQ; | ||
346 | break; | ||
347 | case ARTPEC7: | ||
348 | val |= PCIECFG_NOC_RESET; | ||
349 | break; | ||
350 | } | ||
351 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
352 | usleep_range(100, 200); | ||
353 | } | ||
354 | |||
355 | static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) | ||
356 | { | ||
357 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
358 | struct pcie_port *pp = &pci->pp; | ||
359 | |||
360 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
361 | dw_pcie_msi_init(pp); | ||
362 | } | ||
363 | |||
364 | static int artpec6_pcie_host_init(struct pcie_port *pp) | ||
365 | { | ||
366 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
367 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
368 | |||
369 | artpec6_pcie_assert_core_reset(artpec6_pcie); | ||
370 | artpec6_pcie_init_phy(artpec6_pcie); | ||
371 | artpec6_pcie_deassert_core_reset(artpec6_pcie); | ||
372 | artpec6_pcie_wait_for_phy(artpec6_pcie); | ||
373 | artpec6_pcie_set_nfts(artpec6_pcie); | ||
374 | dw_pcie_setup_rc(pp); | ||
375 | artpec6_pcie_establish_link(pci); | ||
376 | dw_pcie_wait_for_link(pci); | ||
377 | artpec6_pcie_enable_interrupts(artpec6_pcie); | ||
378 | |||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { | ||
383 | .host_init = artpec6_pcie_host_init, | ||
384 | }; | ||
385 | |||
386 | static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, | ||
387 | struct platform_device *pdev) | ||
388 | { | ||
389 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
390 | struct pcie_port *pp = &pci->pp; | ||
391 | struct device *dev = pci->dev; | ||
392 | int ret; | ||
393 | |||
394 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
395 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
396 | if (pp->msi_irq < 0) { | ||
397 | dev_err(dev, "failed to get MSI irq\n"); | ||
398 | return pp->msi_irq; | ||
399 | } | ||
400 | } | ||
401 | |||
402 | pp->root_bus_nr = -1; | ||
403 | pp->ops = &artpec6_pcie_host_ops; | ||
404 | |||
405 | ret = dw_pcie_host_init(pp); | ||
406 | if (ret) { | ||
407 | dev_err(dev, "failed to initialize host\n"); | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) | ||
415 | { | ||
416 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
417 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
418 | enum pci_barno bar; | ||
419 | |||
420 | artpec6_pcie_assert_core_reset(artpec6_pcie); | ||
421 | artpec6_pcie_init_phy(artpec6_pcie); | ||
422 | artpec6_pcie_deassert_core_reset(artpec6_pcie); | ||
423 | artpec6_pcie_wait_for_phy(artpec6_pcie); | ||
424 | artpec6_pcie_set_nfts(artpec6_pcie); | ||
425 | |||
426 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
427 | dw_pcie_ep_reset_bar(pci, bar); | ||
428 | } | ||
429 | |||
430 | static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
431 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
432 | { | ||
433 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
434 | |||
435 | switch (type) { | ||
436 | case PCI_EPC_IRQ_LEGACY: | ||
437 | dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); | ||
438 | return -EINVAL; | ||
439 | case PCI_EPC_IRQ_MSI: | ||
440 | return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); | ||
441 | default: | ||
442 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
443 | } | ||
444 | |||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | static struct dw_pcie_ep_ops pcie_ep_ops = { | ||
449 | .ep_init = artpec6_pcie_ep_init, | ||
450 | .raise_irq = artpec6_pcie_raise_irq, | ||
451 | }; | ||
452 | |||
453 | static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, | ||
454 | struct platform_device *pdev) | ||
455 | { | ||
456 | int ret; | ||
457 | struct dw_pcie_ep *ep; | ||
458 | struct resource *res; | ||
459 | struct device *dev = &pdev->dev; | ||
460 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
461 | |||
462 | ep = &pci->ep; | ||
463 | ep->ops = &pcie_ep_ops; | ||
464 | |||
465 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); | ||
466 | pci->dbi_base2 = devm_ioremap_resource(dev, res); | ||
467 | if (IS_ERR(pci->dbi_base2)) | ||
468 | return PTR_ERR(pci->dbi_base2); | ||
469 | |||
470 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
471 | if (!res) | ||
472 | return -EINVAL; | ||
473 | |||
474 | ep->phys_base = res->start; | ||
475 | ep->addr_size = resource_size(res); | ||
476 | |||
477 | ret = dw_pcie_ep_init(ep); | ||
478 | if (ret) { | ||
479 | dev_err(dev, "failed to initialize endpoint\n"); | ||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | static int artpec6_pcie_probe(struct platform_device *pdev) | ||
487 | { | ||
488 | struct device *dev = &pdev->dev; | ||
489 | struct dw_pcie *pci; | ||
490 | struct artpec6_pcie *artpec6_pcie; | ||
491 | struct resource *dbi_base; | ||
492 | struct resource *phy_base; | ||
493 | int ret; | ||
494 | const struct of_device_id *match; | ||
495 | const struct artpec_pcie_of_data *data; | ||
496 | enum artpec_pcie_variants variant; | ||
497 | enum dw_pcie_device_mode mode; | ||
498 | |||
499 | match = of_match_device(artpec6_pcie_of_match, dev); | ||
500 | if (!match) | ||
501 | return -EINVAL; | ||
502 | |||
503 | data = (struct artpec_pcie_of_data *)match->data; | ||
504 | variant = (enum artpec_pcie_variants)data->variant; | ||
505 | mode = (enum dw_pcie_device_mode)data->mode; | ||
506 | |||
507 | artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); | ||
508 | if (!artpec6_pcie) | ||
509 | return -ENOMEM; | ||
510 | |||
511 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
512 | if (!pci) | ||
513 | return -ENOMEM; | ||
514 | |||
515 | pci->dev = dev; | ||
516 | pci->ops = &dw_pcie_ops; | ||
517 | |||
518 | artpec6_pcie->pci = pci; | ||
519 | artpec6_pcie->variant = variant; | ||
520 | artpec6_pcie->mode = mode; | ||
521 | |||
522 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
523 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | ||
524 | if (IS_ERR(pci->dbi_base)) | ||
525 | return PTR_ERR(pci->dbi_base); | ||
526 | |||
527 | phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); | ||
528 | artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); | ||
529 | if (IS_ERR(artpec6_pcie->phy_base)) | ||
530 | return PTR_ERR(artpec6_pcie->phy_base); | ||
531 | |||
532 | artpec6_pcie->regmap = | ||
533 | syscon_regmap_lookup_by_phandle(dev->of_node, | ||
534 | "axis,syscon-pcie"); | ||
535 | if (IS_ERR(artpec6_pcie->regmap)) | ||
536 | return PTR_ERR(artpec6_pcie->regmap); | ||
537 | |||
538 | platform_set_drvdata(pdev, artpec6_pcie); | ||
539 | |||
540 | switch (artpec6_pcie->mode) { | ||
541 | case DW_PCIE_RC_TYPE: | ||
542 | if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST)) | ||
543 | return -ENODEV; | ||
544 | |||
545 | ret = artpec6_add_pcie_port(artpec6_pcie, pdev); | ||
546 | if (ret < 0) | ||
547 | return ret; | ||
548 | break; | ||
549 | case DW_PCIE_EP_TYPE: { | ||
550 | u32 val; | ||
551 | |||
552 | if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP)) | ||
553 | return -ENODEV; | ||
554 | |||
555 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
556 | val &= ~PCIECFG_DEVICE_TYPE_MASK; | ||
557 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
558 | ret = artpec6_add_pcie_ep(artpec6_pcie, pdev); | ||
559 | if (ret < 0) | ||
560 | return ret; | ||
561 | break; | ||
562 | } | ||
563 | default: | ||
564 | dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode); | ||
565 | } | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = { | ||
571 | .variant = ARTPEC6, | ||
572 | .mode = DW_PCIE_RC_TYPE, | ||
573 | }; | ||
574 | |||
575 | static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = { | ||
576 | .variant = ARTPEC6, | ||
577 | .mode = DW_PCIE_EP_TYPE, | ||
578 | }; | ||
579 | |||
580 | static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = { | ||
581 | .variant = ARTPEC7, | ||
582 | .mode = DW_PCIE_RC_TYPE, | ||
583 | }; | ||
584 | |||
585 | static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = { | ||
586 | .variant = ARTPEC7, | ||
587 | .mode = DW_PCIE_EP_TYPE, | ||
588 | }; | ||
589 | |||
590 | static const struct of_device_id artpec6_pcie_of_match[] = { | ||
591 | { | ||
592 | .compatible = "axis,artpec6-pcie", | ||
593 | .data = &artpec6_pcie_rc_of_data, | ||
594 | }, | ||
595 | { | ||
596 | .compatible = "axis,artpec6-pcie-ep", | ||
597 | .data = &artpec6_pcie_ep_of_data, | ||
598 | }, | ||
599 | { | ||
600 | .compatible = "axis,artpec7-pcie", | ||
601 | .data = &artpec7_pcie_rc_of_data, | ||
602 | }, | ||
603 | { | ||
604 | .compatible = "axis,artpec7-pcie-ep", | ||
605 | .data = &artpec7_pcie_ep_of_data, | ||
606 | }, | ||
607 | {}, | ||
608 | }; | ||
609 | |||
610 | static struct platform_driver artpec6_pcie_driver = { | ||
611 | .probe = artpec6_pcie_probe, | ||
612 | .driver = { | ||
613 | .name = "artpec6-pcie", | ||
614 | .of_match_table = artpec6_pcie_of_match, | ||
615 | .suppress_bind_attrs = true, | ||
616 | }, | ||
617 | }; | ||
618 | builtin_platform_driver(artpec6_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c new file mode 100644 index 000000000000..1eec4415a77f --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c | |||
@@ -0,0 +1,422 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /** | ||
3 | * Synopsys DesignWare PCIe Endpoint controller driver | ||
4 | * | ||
5 | * Copyright (C) 2017 Texas Instruments | ||
6 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/of.h> | ||
10 | |||
11 | #include "pcie-designware.h" | ||
12 | #include <linux/pci-epc.h> | ||
13 | #include <linux/pci-epf.h> | ||
14 | |||
15 | void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) | ||
16 | { | ||
17 | struct pci_epc *epc = ep->epc; | ||
18 | |||
19 | pci_epc_linkup(epc); | ||
20 | } | ||
21 | |||
22 | static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, | ||
23 | int flags) | ||
24 | { | ||
25 | u32 reg; | ||
26 | |||
27 | reg = PCI_BASE_ADDRESS_0 + (4 * bar); | ||
28 | dw_pcie_dbi_ro_wr_en(pci); | ||
29 | dw_pcie_writel_dbi2(pci, reg, 0x0); | ||
30 | dw_pcie_writel_dbi(pci, reg, 0x0); | ||
31 | if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { | ||
32 | dw_pcie_writel_dbi2(pci, reg + 4, 0x0); | ||
33 | dw_pcie_writel_dbi(pci, reg + 4, 0x0); | ||
34 | } | ||
35 | dw_pcie_dbi_ro_wr_dis(pci); | ||
36 | } | ||
37 | |||
38 | void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) | ||
39 | { | ||
40 | __dw_pcie_ep_reset_bar(pci, bar, 0); | ||
41 | } | ||
42 | |||
43 | static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, | ||
44 | struct pci_epf_header *hdr) | ||
45 | { | ||
46 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
47 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
48 | |||
49 | dw_pcie_dbi_ro_wr_en(pci); | ||
50 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); | ||
51 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); | ||
52 | dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); | ||
53 | dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); | ||
54 | dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, | ||
55 | hdr->subclass_code | hdr->baseclass_code << 8); | ||
56 | dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, | ||
57 | hdr->cache_line_size); | ||
58 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, | ||
59 | hdr->subsys_vendor_id); | ||
60 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); | ||
61 | dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, | ||
62 | hdr->interrupt_pin); | ||
63 | dw_pcie_dbi_ro_wr_dis(pci); | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, | ||
69 | dma_addr_t cpu_addr, | ||
70 | enum dw_pcie_as_type as_type) | ||
71 | { | ||
72 | int ret; | ||
73 | u32 free_win; | ||
74 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
75 | |||
76 | free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); | ||
77 | if (free_win >= ep->num_ib_windows) { | ||
78 | dev_err(pci->dev, "No free inbound window\n"); | ||
79 | return -EINVAL; | ||
80 | } | ||
81 | |||
82 | ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, | ||
83 | as_type); | ||
84 | if (ret < 0) { | ||
85 | dev_err(pci->dev, "Failed to program IB window\n"); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | ep->bar_to_atu[bar] = free_win; | ||
90 | set_bit(free_win, ep->ib_window_map); | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, | ||
96 | u64 pci_addr, size_t size) | ||
97 | { | ||
98 | u32 free_win; | ||
99 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
100 | |||
101 | free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); | ||
102 | if (free_win >= ep->num_ob_windows) { | ||
103 | dev_err(pci->dev, "No free outbound window\n"); | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | |||
107 | dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, | ||
108 | phys_addr, pci_addr, size); | ||
109 | |||
110 | set_bit(free_win, ep->ob_window_map); | ||
111 | ep->outbound_addr[free_win] = phys_addr; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, | ||
117 | struct pci_epf_bar *epf_bar) | ||
118 | { | ||
119 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
120 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
121 | enum pci_barno bar = epf_bar->barno; | ||
122 | u32 atu_index = ep->bar_to_atu[bar]; | ||
123 | |||
124 | __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags); | ||
125 | |||
126 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); | ||
127 | clear_bit(atu_index, ep->ib_window_map); | ||
128 | } | ||
129 | |||
130 | static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, | ||
131 | struct pci_epf_bar *epf_bar) | ||
132 | { | ||
133 | int ret; | ||
134 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
135 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
136 | enum pci_barno bar = epf_bar->barno; | ||
137 | size_t size = epf_bar->size; | ||
138 | int flags = epf_bar->flags; | ||
139 | enum dw_pcie_as_type as_type; | ||
140 | u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); | ||
141 | |||
142 | if (!(flags & PCI_BASE_ADDRESS_SPACE)) | ||
143 | as_type = DW_PCIE_AS_MEM; | ||
144 | else | ||
145 | as_type = DW_PCIE_AS_IO; | ||
146 | |||
147 | ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type); | ||
148 | if (ret) | ||
149 | return ret; | ||
150 | |||
151 | dw_pcie_dbi_ro_wr_en(pci); | ||
152 | |||
153 | dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); | ||
154 | dw_pcie_writel_dbi(pci, reg, flags); | ||
155 | |||
156 | if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { | ||
157 | dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1)); | ||
158 | dw_pcie_writel_dbi(pci, reg + 4, 0); | ||
159 | } | ||
160 | |||
161 | dw_pcie_dbi_ro_wr_dis(pci); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, | ||
167 | u32 *atu_index) | ||
168 | { | ||
169 | u32 index; | ||
170 | |||
171 | for (index = 0; index < ep->num_ob_windows; index++) { | ||
172 | if (ep->outbound_addr[index] != addr) | ||
173 | continue; | ||
174 | *atu_index = index; | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | return -EINVAL; | ||
179 | } | ||
180 | |||
181 | static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, | ||
182 | phys_addr_t addr) | ||
183 | { | ||
184 | int ret; | ||
185 | u32 atu_index; | ||
186 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
187 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
188 | |||
189 | ret = dw_pcie_find_index(ep, addr, &atu_index); | ||
190 | if (ret < 0) | ||
191 | return; | ||
192 | |||
193 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); | ||
194 | clear_bit(atu_index, ep->ob_window_map); | ||
195 | } | ||
196 | |||
197 | static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, | ||
198 | phys_addr_t addr, | ||
199 | u64 pci_addr, size_t size) | ||
200 | { | ||
201 | int ret; | ||
202 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
203 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
204 | |||
205 | ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); | ||
206 | if (ret) { | ||
207 | dev_err(pci->dev, "Failed to enable address\n"); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) | ||
215 | { | ||
216 | int val; | ||
217 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
218 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
219 | |||
220 | val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | ||
221 | if (!(val & MSI_CAP_MSI_EN_MASK)) | ||
222 | return -EINVAL; | ||
223 | |||
224 | val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; | ||
225 | return val; | ||
226 | } | ||
227 | |||
228 | static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int) | ||
229 | { | ||
230 | int val; | ||
231 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
232 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
233 | |||
234 | val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | ||
235 | val &= ~MSI_CAP_MMC_MASK; | ||
236 | val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK; | ||
237 | dw_pcie_dbi_ro_wr_en(pci); | ||
238 | dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); | ||
239 | dw_pcie_dbi_ro_wr_dis(pci); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, | ||
245 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
246 | { | ||
247 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
248 | |||
249 | if (!ep->ops->raise_irq) | ||
250 | return -EINVAL; | ||
251 | |||
252 | return ep->ops->raise_irq(ep, func_no, type, interrupt_num); | ||
253 | } | ||
254 | |||
255 | static void dw_pcie_ep_stop(struct pci_epc *epc) | ||
256 | { | ||
257 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
258 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
259 | |||
260 | if (!pci->ops->stop_link) | ||
261 | return; | ||
262 | |||
263 | pci->ops->stop_link(pci); | ||
264 | } | ||
265 | |||
266 | static int dw_pcie_ep_start(struct pci_epc *epc) | ||
267 | { | ||
268 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
269 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
270 | |||
271 | if (!pci->ops->start_link) | ||
272 | return -EINVAL; | ||
273 | |||
274 | return pci->ops->start_link(pci); | ||
275 | } | ||
276 | |||
277 | static const struct pci_epc_ops epc_ops = { | ||
278 | .write_header = dw_pcie_ep_write_header, | ||
279 | .set_bar = dw_pcie_ep_set_bar, | ||
280 | .clear_bar = dw_pcie_ep_clear_bar, | ||
281 | .map_addr = dw_pcie_ep_map_addr, | ||
282 | .unmap_addr = dw_pcie_ep_unmap_addr, | ||
283 | .set_msi = dw_pcie_ep_set_msi, | ||
284 | .get_msi = dw_pcie_ep_get_msi, | ||
285 | .raise_irq = dw_pcie_ep_raise_irq, | ||
286 | .start = dw_pcie_ep_start, | ||
287 | .stop = dw_pcie_ep_stop, | ||
288 | }; | ||
289 | |||
290 | int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
291 | u8 interrupt_num) | ||
292 | { | ||
293 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
294 | struct pci_epc *epc = ep->epc; | ||
295 | u16 msg_ctrl, msg_data; | ||
296 | u32 msg_addr_lower, msg_addr_upper; | ||
297 | u64 msg_addr; | ||
298 | bool has_upper; | ||
299 | int ret; | ||
300 | |||
301 | /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ | ||
302 | msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | ||
303 | has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); | ||
304 | msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); | ||
305 | if (has_upper) { | ||
306 | msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); | ||
307 | msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64); | ||
308 | } else { | ||
309 | msg_addr_upper = 0; | ||
310 | msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32); | ||
311 | } | ||
312 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | ||
313 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | ||
314 | epc->mem->page_size); | ||
315 | if (ret) | ||
316 | return ret; | ||
317 | |||
318 | writel(msg_data | (interrupt_num - 1), ep->msi_mem); | ||
319 | |||
320 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | ||
326 | { | ||
327 | struct pci_epc *epc = ep->epc; | ||
328 | |||
329 | pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, | ||
330 | epc->mem->page_size); | ||
331 | |||
332 | pci_epc_mem_exit(epc); | ||
333 | } | ||
334 | |||
335 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) | ||
336 | { | ||
337 | int ret; | ||
338 | void *addr; | ||
339 | struct pci_epc *epc; | ||
340 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
341 | struct device *dev = pci->dev; | ||
342 | struct device_node *np = dev->of_node; | ||
343 | |||
344 | if (!pci->dbi_base || !pci->dbi_base2) { | ||
345 | dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); | ||
346 | return -EINVAL; | ||
347 | } | ||
348 | |||
349 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); | ||
350 | if (ret < 0) { | ||
351 | dev_err(dev, "Unable to read *num-ib-windows* property\n"); | ||
352 | return ret; | ||
353 | } | ||
354 | if (ep->num_ib_windows > MAX_IATU_IN) { | ||
355 | dev_err(dev, "Invalid *num-ib-windows*\n"); | ||
356 | return -EINVAL; | ||
357 | } | ||
358 | |||
359 | ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); | ||
360 | if (ret < 0) { | ||
361 | dev_err(dev, "Unable to read *num-ob-windows* property\n"); | ||
362 | return ret; | ||
363 | } | ||
364 | if (ep->num_ob_windows > MAX_IATU_OUT) { | ||
365 | dev_err(dev, "Invalid *num-ob-windows*\n"); | ||
366 | return -EINVAL; | ||
367 | } | ||
368 | |||
369 | ep->ib_window_map = devm_kzalloc(dev, sizeof(long) * | ||
370 | BITS_TO_LONGS(ep->num_ib_windows), | ||
371 | GFP_KERNEL); | ||
372 | if (!ep->ib_window_map) | ||
373 | return -ENOMEM; | ||
374 | |||
375 | ep->ob_window_map = devm_kzalloc(dev, sizeof(long) * | ||
376 | BITS_TO_LONGS(ep->num_ob_windows), | ||
377 | GFP_KERNEL); | ||
378 | if (!ep->ob_window_map) | ||
379 | return -ENOMEM; | ||
380 | |||
381 | addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows, | ||
382 | GFP_KERNEL); | ||
383 | if (!addr) | ||
384 | return -ENOMEM; | ||
385 | ep->outbound_addr = addr; | ||
386 | |||
387 | if (ep->ops->ep_init) | ||
388 | ep->ops->ep_init(ep); | ||
389 | |||
390 | epc = devm_pci_epc_create(dev, &epc_ops); | ||
391 | if (IS_ERR(epc)) { | ||
392 | dev_err(dev, "Failed to create epc device\n"); | ||
393 | return PTR_ERR(epc); | ||
394 | } | ||
395 | |||
396 | ret = of_property_read_u8(np, "max-functions", &epc->max_functions); | ||
397 | if (ret < 0) | ||
398 | epc->max_functions = 1; | ||
399 | |||
400 | ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, | ||
401 | ep->page_size); | ||
402 | if (ret < 0) { | ||
403 | dev_err(dev, "Failed to initialize address space\n"); | ||
404 | return ret; | ||
405 | } | ||
406 | |||
407 | ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, | ||
408 | epc->mem->page_size); | ||
409 | if (!ep->msi_mem) { | ||
410 | dev_err(dev, "Failed to reserve memory for MSI\n"); | ||
411 | return -ENOMEM; | ||
412 | } | ||
413 | |||
414 | epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER; | ||
415 | EPC_FEATURE_SET_BAR(epc->features, BAR_0); | ||
416 | |||
417 | ep->epc = epc; | ||
418 | epc_set_drvdata(epc, ep); | ||
419 | dw_pcie_setup(pci); | ||
420 | |||
421 | return 0; | ||
422 | } | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c new file mode 100644 index 000000000000..781aa03aeede --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-host.c | |||
@@ -0,0 +1,722 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Synopsys DesignWare PCIe host controller driver | ||
4 | * | ||
5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
6 | * http://www.samsung.com | ||
7 | * | ||
8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/irqchip/chained_irq.h> | ||
12 | #include <linux/irqdomain.h> | ||
13 | #include <linux/of_address.h> | ||
14 | #include <linux/of_pci.h> | ||
15 | #include <linux/pci_regs.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | |||
18 | #include "../../pci.h" | ||
19 | #include "pcie-designware.h" | ||
20 | |||
21 | static struct pci_ops dw_pcie_ops; | ||
22 | |||
23 | static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
24 | u32 *val) | ||
25 | { | ||
26 | struct dw_pcie *pci; | ||
27 | |||
28 | if (pp->ops->rd_own_conf) | ||
29 | return pp->ops->rd_own_conf(pp, where, size, val); | ||
30 | |||
31 | pci = to_dw_pcie_from_pp(pp); | ||
32 | return dw_pcie_read(pci->dbi_base + where, size, val); | ||
33 | } | ||
34 | |||
35 | static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | ||
36 | u32 val) | ||
37 | { | ||
38 | struct dw_pcie *pci; | ||
39 | |||
40 | if (pp->ops->wr_own_conf) | ||
41 | return pp->ops->wr_own_conf(pp, where, size, val); | ||
42 | |||
43 | pci = to_dw_pcie_from_pp(pp); | ||
44 | return dw_pcie_write(pci->dbi_base + where, size, val); | ||
45 | } | ||
46 | |||
47 | static void dw_msi_ack_irq(struct irq_data *d) | ||
48 | { | ||
49 | irq_chip_ack_parent(d); | ||
50 | } | ||
51 | |||
52 | static void dw_msi_mask_irq(struct irq_data *d) | ||
53 | { | ||
54 | pci_msi_mask_irq(d); | ||
55 | irq_chip_mask_parent(d); | ||
56 | } | ||
57 | |||
58 | static void dw_msi_unmask_irq(struct irq_data *d) | ||
59 | { | ||
60 | pci_msi_unmask_irq(d); | ||
61 | irq_chip_unmask_parent(d); | ||
62 | } | ||
63 | |||
64 | static struct irq_chip dw_pcie_msi_irq_chip = { | ||
65 | .name = "PCI-MSI", | ||
66 | .irq_ack = dw_msi_ack_irq, | ||
67 | .irq_mask = dw_msi_mask_irq, | ||
68 | .irq_unmask = dw_msi_unmask_irq, | ||
69 | }; | ||
70 | |||
71 | static struct msi_domain_info dw_pcie_msi_domain_info = { | ||
72 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
73 | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), | ||
74 | .chip = &dw_pcie_msi_irq_chip, | ||
75 | }; | ||
76 | |||
77 | /* MSI int handler */ | ||
78 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) | ||
79 | { | ||
80 | int i, pos, irq; | ||
81 | u32 val, num_ctrls; | ||
82 | irqreturn_t ret = IRQ_NONE; | ||
83 | |||
84 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | ||
85 | |||
86 | for (i = 0; i < num_ctrls; i++) { | ||
87 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + | ||
88 | (i * MSI_REG_CTRL_BLOCK_SIZE), | ||
89 | 4, &val); | ||
90 | if (!val) | ||
91 | continue; | ||
92 | |||
93 | ret = IRQ_HANDLED; | ||
94 | pos = 0; | ||
95 | while ((pos = find_next_bit((unsigned long *) &val, | ||
96 | MAX_MSI_IRQS_PER_CTRL, | ||
97 | pos)) != MAX_MSI_IRQS_PER_CTRL) { | ||
98 | irq = irq_find_mapping(pp->irq_domain, | ||
99 | (i * MAX_MSI_IRQS_PER_CTRL) + | ||
100 | pos); | ||
101 | generic_handle_irq(irq); | ||
102 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + | ||
103 | (i * MSI_REG_CTRL_BLOCK_SIZE), | ||
104 | 4, 1 << pos); | ||
105 | pos++; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | /* Chained MSI interrupt service routine */ | ||
113 | static void dw_chained_msi_isr(struct irq_desc *desc) | ||
114 | { | ||
115 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
116 | struct pcie_port *pp; | ||
117 | |||
118 | chained_irq_enter(chip, desc); | ||
119 | |||
120 | pp = irq_desc_get_handler_data(desc); | ||
121 | dw_handle_msi_irq(pp); | ||
122 | |||
123 | chained_irq_exit(chip, desc); | ||
124 | } | ||
125 | |||
126 | static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
127 | { | ||
128 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
129 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
130 | u64 msi_target; | ||
131 | |||
132 | if (pp->ops->get_msi_addr) | ||
133 | msi_target = pp->ops->get_msi_addr(pp); | ||
134 | else | ||
135 | msi_target = (u64)pp->msi_data; | ||
136 | |||
137 | msg->address_lo = lower_32_bits(msi_target); | ||
138 | msg->address_hi = upper_32_bits(msi_target); | ||
139 | |||
140 | if (pp->ops->get_msi_data) | ||
141 | msg->data = pp->ops->get_msi_data(pp, data->hwirq); | ||
142 | else | ||
143 | msg->data = data->hwirq; | ||
144 | |||
145 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | ||
146 | (int)data->hwirq, msg->address_hi, msg->address_lo); | ||
147 | } | ||
148 | |||
149 | static int dw_pci_msi_set_affinity(struct irq_data *irq_data, | ||
150 | const struct cpumask *mask, bool force) | ||
151 | { | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | |||
155 | static void dw_pci_bottom_mask(struct irq_data *data) | ||
156 | { | ||
157 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
158 | unsigned int res, bit, ctrl; | ||
159 | unsigned long flags; | ||
160 | |||
161 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
162 | |||
163 | if (pp->ops->msi_clear_irq) { | ||
164 | pp->ops->msi_clear_irq(pp, data->hwirq); | ||
165 | } else { | ||
166 | ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
167 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
168 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
169 | |||
170 | pp->irq_status[ctrl] &= ~(1 << bit); | ||
171 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, | ||
172 | pp->irq_status[ctrl]); | ||
173 | } | ||
174 | |||
175 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
176 | } | ||
177 | |||
178 | static void dw_pci_bottom_unmask(struct irq_data *data) | ||
179 | { | ||
180 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
181 | unsigned int res, bit, ctrl; | ||
182 | unsigned long flags; | ||
183 | |||
184 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
185 | |||
186 | if (pp->ops->msi_set_irq) { | ||
187 | pp->ops->msi_set_irq(pp, data->hwirq); | ||
188 | } else { | ||
189 | ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
190 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
191 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
192 | |||
193 | pp->irq_status[ctrl] |= 1 << bit; | ||
194 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, | ||
195 | pp->irq_status[ctrl]); | ||
196 | } | ||
197 | |||
198 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
199 | } | ||
200 | |||
201 | static void dw_pci_bottom_ack(struct irq_data *d) | ||
202 | { | ||
203 | struct msi_desc *msi = irq_data_get_msi_desc(d); | ||
204 | struct pcie_port *pp; | ||
205 | |||
206 | pp = msi_desc_to_pci_sysdata(msi); | ||
207 | |||
208 | if (pp->ops->msi_irq_ack) | ||
209 | pp->ops->msi_irq_ack(d->hwirq, pp); | ||
210 | } | ||
211 | |||
212 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { | ||
213 | .name = "DWPCI-MSI", | ||
214 | .irq_ack = dw_pci_bottom_ack, | ||
215 | .irq_compose_msi_msg = dw_pci_setup_msi_msg, | ||
216 | .irq_set_affinity = dw_pci_msi_set_affinity, | ||
217 | .irq_mask = dw_pci_bottom_mask, | ||
218 | .irq_unmask = dw_pci_bottom_unmask, | ||
219 | }; | ||
220 | |||
221 | static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, | ||
222 | unsigned int virq, unsigned int nr_irqs, | ||
223 | void *args) | ||
224 | { | ||
225 | struct pcie_port *pp = domain->host_data; | ||
226 | unsigned long flags; | ||
227 | u32 i; | ||
228 | int bit; | ||
229 | |||
230 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
231 | |||
232 | bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, | ||
233 | order_base_2(nr_irqs)); | ||
234 | |||
235 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
236 | |||
237 | if (bit < 0) | ||
238 | return -ENOSPC; | ||
239 | |||
240 | for (i = 0; i < nr_irqs; i++) | ||
241 | irq_domain_set_info(domain, virq + i, bit + i, | ||
242 | &dw_pci_msi_bottom_irq_chip, | ||
243 | pp, handle_edge_irq, | ||
244 | NULL, NULL); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static void dw_pcie_irq_domain_free(struct irq_domain *domain, | ||
250 | unsigned int virq, unsigned int nr_irqs) | ||
251 | { | ||
252 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | ||
253 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
254 | unsigned long flags; | ||
255 | |||
256 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
257 | |||
258 | bitmap_release_region(pp->msi_irq_in_use, data->hwirq, | ||
259 | order_base_2(nr_irqs)); | ||
260 | |||
261 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
262 | } | ||
263 | |||
264 | static const struct irq_domain_ops dw_pcie_msi_domain_ops = { | ||
265 | .alloc = dw_pcie_irq_domain_alloc, | ||
266 | .free = dw_pcie_irq_domain_free, | ||
267 | }; | ||
268 | |||
269 | int dw_pcie_allocate_domains(struct pcie_port *pp) | ||
270 | { | ||
271 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
272 | struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); | ||
273 | |||
274 | pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, | ||
275 | &dw_pcie_msi_domain_ops, pp); | ||
276 | if (!pp->irq_domain) { | ||
277 | dev_err(pci->dev, "Failed to create IRQ domain\n"); | ||
278 | return -ENOMEM; | ||
279 | } | ||
280 | |||
281 | pp->msi_domain = pci_msi_create_irq_domain(fwnode, | ||
282 | &dw_pcie_msi_domain_info, | ||
283 | pp->irq_domain); | ||
284 | if (!pp->msi_domain) { | ||
285 | dev_err(pci->dev, "Failed to create MSI domain\n"); | ||
286 | irq_domain_remove(pp->irq_domain); | ||
287 | return -ENOMEM; | ||
288 | } | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | void dw_pcie_free_msi(struct pcie_port *pp) | ||
294 | { | ||
295 | irq_set_chained_handler(pp->msi_irq, NULL); | ||
296 | irq_set_handler_data(pp->msi_irq, NULL); | ||
297 | |||
298 | irq_domain_remove(pp->msi_domain); | ||
299 | irq_domain_remove(pp->irq_domain); | ||
300 | } | ||
301 | |||
302 | void dw_pcie_msi_init(struct pcie_port *pp) | ||
303 | { | ||
304 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
305 | struct device *dev = pci->dev; | ||
306 | struct page *page; | ||
307 | u64 msi_target; | ||
308 | |||
309 | page = alloc_page(GFP_KERNEL); | ||
310 | pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
311 | if (dma_mapping_error(dev, pp->msi_data)) { | ||
312 | dev_err(dev, "Failed to map MSI data\n"); | ||
313 | __free_page(page); | ||
314 | return; | ||
315 | } | ||
316 | msi_target = (u64)pp->msi_data; | ||
317 | |||
318 | /* Program the msi_data */ | ||
319 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, | ||
320 | lower_32_bits(msi_target)); | ||
321 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, | ||
322 | upper_32_bits(msi_target)); | ||
323 | } | ||
324 | |||
325 | int dw_pcie_host_init(struct pcie_port *pp) | ||
326 | { | ||
327 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
328 | struct device *dev = pci->dev; | ||
329 | struct device_node *np = dev->of_node; | ||
330 | struct platform_device *pdev = to_platform_device(dev); | ||
331 | struct resource_entry *win, *tmp; | ||
332 | struct pci_bus *bus, *child; | ||
333 | struct pci_host_bridge *bridge; | ||
334 | struct resource *cfg_res; | ||
335 | int ret; | ||
336 | |||
337 | raw_spin_lock_init(&pci->pp.lock); | ||
338 | |||
339 | cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); | ||
340 | if (cfg_res) { | ||
341 | pp->cfg0_size = resource_size(cfg_res) >> 1; | ||
342 | pp->cfg1_size = resource_size(cfg_res) >> 1; | ||
343 | pp->cfg0_base = cfg_res->start; | ||
344 | pp->cfg1_base = cfg_res->start + pp->cfg0_size; | ||
345 | } else if (!pp->va_cfg0_base) { | ||
346 | dev_err(dev, "Missing *config* reg space\n"); | ||
347 | } | ||
348 | |||
349 | bridge = pci_alloc_host_bridge(0); | ||
350 | if (!bridge) | ||
351 | return -ENOMEM; | ||
352 | |||
353 | ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, | ||
354 | &bridge->windows, &pp->io_base); | ||
355 | if (ret) | ||
356 | return ret; | ||
357 | |||
358 | ret = devm_request_pci_bus_resources(dev, &bridge->windows); | ||
359 | if (ret) | ||
360 | goto error; | ||
361 | |||
362 | /* Get the I/O and memory ranges from DT */ | ||
363 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { | ||
364 | switch (resource_type(win->res)) { | ||
365 | case IORESOURCE_IO: | ||
366 | ret = pci_remap_iospace(win->res, pp->io_base); | ||
367 | if (ret) { | ||
368 | dev_warn(dev, "Error %d: failed to map resource %pR\n", | ||
369 | ret, win->res); | ||
370 | resource_list_destroy_entry(win); | ||
371 | } else { | ||
372 | pp->io = win->res; | ||
373 | pp->io->name = "I/O"; | ||
374 | pp->io_size = resource_size(pp->io); | ||
375 | pp->io_bus_addr = pp->io->start - win->offset; | ||
376 | } | ||
377 | break; | ||
378 | case IORESOURCE_MEM: | ||
379 | pp->mem = win->res; | ||
380 | pp->mem->name = "MEM"; | ||
381 | pp->mem_size = resource_size(pp->mem); | ||
382 | pp->mem_bus_addr = pp->mem->start - win->offset; | ||
383 | break; | ||
384 | case 0: | ||
385 | pp->cfg = win->res; | ||
386 | pp->cfg0_size = resource_size(pp->cfg) >> 1; | ||
387 | pp->cfg1_size = resource_size(pp->cfg) >> 1; | ||
388 | pp->cfg0_base = pp->cfg->start; | ||
389 | pp->cfg1_base = pp->cfg->start + pp->cfg0_size; | ||
390 | break; | ||
391 | case IORESOURCE_BUS: | ||
392 | pp->busn = win->res; | ||
393 | break; | ||
394 | } | ||
395 | } | ||
396 | |||
397 | if (!pci->dbi_base) { | ||
398 | pci->dbi_base = devm_pci_remap_cfgspace(dev, | ||
399 | pp->cfg->start, | ||
400 | resource_size(pp->cfg)); | ||
401 | if (!pci->dbi_base) { | ||
402 | dev_err(dev, "Error with ioremap\n"); | ||
403 | ret = -ENOMEM; | ||
404 | goto error; | ||
405 | } | ||
406 | } | ||
407 | |||
408 | pp->mem_base = pp->mem->start; | ||
409 | |||
410 | if (!pp->va_cfg0_base) { | ||
411 | pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, | ||
412 | pp->cfg0_base, pp->cfg0_size); | ||
413 | if (!pp->va_cfg0_base) { | ||
414 | dev_err(dev, "Error with ioremap in function\n"); | ||
415 | ret = -ENOMEM; | ||
416 | goto error; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | if (!pp->va_cfg1_base) { | ||
421 | pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, | ||
422 | pp->cfg1_base, | ||
423 | pp->cfg1_size); | ||
424 | if (!pp->va_cfg1_base) { | ||
425 | dev_err(dev, "Error with ioremap\n"); | ||
426 | ret = -ENOMEM; | ||
427 | goto error; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); | ||
432 | if (ret) | ||
433 | pci->num_viewport = 2; | ||
434 | |||
435 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
436 | /* | ||
437 | * If a specific SoC driver needs to change the | ||
438 | * default number of vectors, it needs to implement | ||
439 | * the set_num_vectors callback. | ||
440 | */ | ||
441 | if (!pp->ops->set_num_vectors) { | ||
442 | pp->num_vectors = MSI_DEF_NUM_VECTORS; | ||
443 | } else { | ||
444 | pp->ops->set_num_vectors(pp); | ||
445 | |||
446 | if (pp->num_vectors > MAX_MSI_IRQS || | ||
447 | pp->num_vectors == 0) { | ||
448 | dev_err(dev, | ||
449 | "Invalid number of vectors\n"); | ||
450 | goto error; | ||
451 | } | ||
452 | } | ||
453 | |||
454 | if (!pp->ops->msi_host_init) { | ||
455 | ret = dw_pcie_allocate_domains(pp); | ||
456 | if (ret) | ||
457 | goto error; | ||
458 | |||
459 | if (pp->msi_irq) | ||
460 | irq_set_chained_handler_and_data(pp->msi_irq, | ||
461 | dw_chained_msi_isr, | ||
462 | pp); | ||
463 | } else { | ||
464 | ret = pp->ops->msi_host_init(pp); | ||
465 | if (ret < 0) | ||
466 | goto error; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | if (pp->ops->host_init) { | ||
471 | ret = pp->ops->host_init(pp); | ||
472 | if (ret) | ||
473 | goto error; | ||
474 | } | ||
475 | |||
476 | pp->root_bus_nr = pp->busn->start; | ||
477 | |||
478 | bridge->dev.parent = dev; | ||
479 | bridge->sysdata = pp; | ||
480 | bridge->busnr = pp->root_bus_nr; | ||
481 | bridge->ops = &dw_pcie_ops; | ||
482 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
483 | bridge->swizzle_irq = pci_common_swizzle; | ||
484 | |||
485 | ret = pci_scan_root_bus_bridge(bridge); | ||
486 | if (ret) | ||
487 | goto error; | ||
488 | |||
489 | bus = bridge->bus; | ||
490 | |||
491 | if (pp->ops->scan_bus) | ||
492 | pp->ops->scan_bus(pp); | ||
493 | |||
494 | pci_bus_size_bridges(bus); | ||
495 | pci_bus_assign_resources(bus); | ||
496 | |||
497 | list_for_each_entry(child, &bus->children, node) | ||
498 | pcie_bus_configure_settings(child); | ||
499 | |||
500 | pci_bus_add_devices(bus); | ||
501 | return 0; | ||
502 | |||
503 | error: | ||
504 | pci_free_host_bridge(bridge); | ||
505 | return ret; | ||
506 | } | ||
507 | |||
508 | static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
509 | u32 devfn, int where, int size, u32 *val) | ||
510 | { | ||
511 | int ret, type; | ||
512 | u32 busdev, cfg_size; | ||
513 | u64 cpu_addr; | ||
514 | void __iomem *va_cfg_base; | ||
515 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
516 | |||
517 | if (pp->ops->rd_other_conf) | ||
518 | return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); | ||
519 | |||
520 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | ||
521 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | ||
522 | |||
523 | if (bus->parent->number == pp->root_bus_nr) { | ||
524 | type = PCIE_ATU_TYPE_CFG0; | ||
525 | cpu_addr = pp->cfg0_base; | ||
526 | cfg_size = pp->cfg0_size; | ||
527 | va_cfg_base = pp->va_cfg0_base; | ||
528 | } else { | ||
529 | type = PCIE_ATU_TYPE_CFG1; | ||
530 | cpu_addr = pp->cfg1_base; | ||
531 | cfg_size = pp->cfg1_size; | ||
532 | va_cfg_base = pp->va_cfg1_base; | ||
533 | } | ||
534 | |||
535 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
536 | type, cpu_addr, | ||
537 | busdev, cfg_size); | ||
538 | ret = dw_pcie_read(va_cfg_base + where, size, val); | ||
539 | if (pci->num_viewport <= 2) | ||
540 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
541 | PCIE_ATU_TYPE_IO, pp->io_base, | ||
542 | pp->io_bus_addr, pp->io_size); | ||
543 | |||
544 | return ret; | ||
545 | } | ||
546 | |||
547 | static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
548 | u32 devfn, int where, int size, u32 val) | ||
549 | { | ||
550 | int ret, type; | ||
551 | u32 busdev, cfg_size; | ||
552 | u64 cpu_addr; | ||
553 | void __iomem *va_cfg_base; | ||
554 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
555 | |||
556 | if (pp->ops->wr_other_conf) | ||
557 | return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); | ||
558 | |||
559 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | ||
560 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | ||
561 | |||
562 | if (bus->parent->number == pp->root_bus_nr) { | ||
563 | type = PCIE_ATU_TYPE_CFG0; | ||
564 | cpu_addr = pp->cfg0_base; | ||
565 | cfg_size = pp->cfg0_size; | ||
566 | va_cfg_base = pp->va_cfg0_base; | ||
567 | } else { | ||
568 | type = PCIE_ATU_TYPE_CFG1; | ||
569 | cpu_addr = pp->cfg1_base; | ||
570 | cfg_size = pp->cfg1_size; | ||
571 | va_cfg_base = pp->va_cfg1_base; | ||
572 | } | ||
573 | |||
574 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
575 | type, cpu_addr, | ||
576 | busdev, cfg_size); | ||
577 | ret = dw_pcie_write(va_cfg_base + where, size, val); | ||
578 | if (pci->num_viewport <= 2) | ||
579 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
580 | PCIE_ATU_TYPE_IO, pp->io_base, | ||
581 | pp->io_bus_addr, pp->io_size); | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | |||
586 | static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, | ||
587 | int dev) | ||
588 | { | ||
589 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
590 | |||
591 | /* If there is no link, then there is no device */ | ||
592 | if (bus->number != pp->root_bus_nr) { | ||
593 | if (!dw_pcie_link_up(pci)) | ||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | /* Access only one slot on each root port */ | ||
598 | if (bus->number == pp->root_bus_nr && dev > 0) | ||
599 | return 0; | ||
600 | |||
601 | return 1; | ||
602 | } | ||
603 | |||
604 | static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | ||
605 | int size, u32 *val) | ||
606 | { | ||
607 | struct pcie_port *pp = bus->sysdata; | ||
608 | |||
609 | if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { | ||
610 | *val = 0xffffffff; | ||
611 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
612 | } | ||
613 | |||
614 | if (bus->number == pp->root_bus_nr) | ||
615 | return dw_pcie_rd_own_conf(pp, where, size, val); | ||
616 | |||
617 | return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); | ||
618 | } | ||
619 | |||
620 | static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | ||
621 | int where, int size, u32 val) | ||
622 | { | ||
623 | struct pcie_port *pp = bus->sysdata; | ||
624 | |||
625 | if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) | ||
626 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
627 | |||
628 | if (bus->number == pp->root_bus_nr) | ||
629 | return dw_pcie_wr_own_conf(pp, where, size, val); | ||
630 | |||
631 | return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); | ||
632 | } | ||
633 | |||
634 | static struct pci_ops dw_pcie_ops = { | ||
635 | .read = dw_pcie_rd_conf, | ||
636 | .write = dw_pcie_wr_conf, | ||
637 | }; | ||
638 | |||
639 | static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) | ||
640 | { | ||
641 | u32 val; | ||
642 | |||
643 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); | ||
644 | if (val == 0xffffffff) | ||
645 | return 1; | ||
646 | |||
647 | return 0; | ||
648 | } | ||
649 | |||
650 | void dw_pcie_setup_rc(struct pcie_port *pp) | ||
651 | { | ||
652 | u32 val, ctrl, num_ctrls; | ||
653 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
654 | |||
655 | dw_pcie_setup(pci); | ||
656 | |||
657 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | ||
658 | |||
659 | /* Initialize IRQ Status array */ | ||
660 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) | ||
661 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + | ||
662 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | ||
663 | 4, &pp->irq_status[ctrl]); | ||
664 | |||
665 | /* Setup RC BARs */ | ||
666 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); | ||
667 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); | ||
668 | |||
669 | /* Setup interrupt pins */ | ||
670 | dw_pcie_dbi_ro_wr_en(pci); | ||
671 | val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); | ||
672 | val &= 0xffff00ff; | ||
673 | val |= 0x00000100; | ||
674 | dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); | ||
675 | dw_pcie_dbi_ro_wr_dis(pci); | ||
676 | |||
677 | /* Setup bus numbers */ | ||
678 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); | ||
679 | val &= 0xff000000; | ||
680 | val |= 0x00ff0100; | ||
681 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); | ||
682 | |||
683 | /* Setup command register */ | ||
684 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | ||
685 | val &= 0xffff0000; | ||
686 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | ||
687 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; | ||
688 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); | ||
689 | |||
690 | /* | ||
691 | * If the platform provides ->rd_other_conf, it means the platform | ||
692 | * uses its own address translation component rather than ATU, so | ||
693 | * we should not program the ATU here. | ||
694 | */ | ||
695 | if (!pp->ops->rd_other_conf) { | ||
696 | /* Get iATU unroll support */ | ||
697 | pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); | ||
698 | dev_dbg(pci->dev, "iATU unroll: %s\n", | ||
699 | pci->iatu_unroll_enabled ? "enabled" : "disabled"); | ||
700 | |||
701 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, | ||
702 | PCIE_ATU_TYPE_MEM, pp->mem_base, | ||
703 | pp->mem_bus_addr, pp->mem_size); | ||
704 | if (pci->num_viewport > 2) | ||
705 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, | ||
706 | PCIE_ATU_TYPE_IO, pp->io_base, | ||
707 | pp->io_bus_addr, pp->io_size); | ||
708 | } | ||
709 | |||
710 | dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); | ||
711 | |||
712 | /* Enable write permission for the DBI read-only register */ | ||
713 | dw_pcie_dbi_ro_wr_en(pci); | ||
714 | /* Program correct class for RC */ | ||
715 | dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); | ||
716 | /* Better disable write permission right after the update */ | ||
717 | dw_pcie_dbi_ro_wr_dis(pci); | ||
718 | |||
719 | dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); | ||
720 | val |= PORT_LOGIC_SPEED_CHANGE; | ||
721 | dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); | ||
722 | } | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c new file mode 100644 index 000000000000..5937fed4c938 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c | |||
@@ -0,0 +1,259 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe RC driver for Synopsys DesignWare Core | ||
4 | * | ||
5 | * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) | ||
6 | * | ||
7 | * Authors: Joao Pinto <Joao.Pinto@synopsys.com> | ||
8 | */ | ||
9 | #include <linux/clk.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/gpio.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/of_device.h> | ||
16 | #include <linux/of_gpio.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/resource.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/regmap.h> | ||
23 | |||
24 | #include "pcie-designware.h" | ||
25 | |||
26 | struct dw_plat_pcie { | ||
27 | struct dw_pcie *pci; | ||
28 | struct regmap *regmap; | ||
29 | enum dw_pcie_device_mode mode; | ||
30 | }; | ||
31 | |||
32 | struct dw_plat_pcie_of_data { | ||
33 | enum dw_pcie_device_mode mode; | ||
34 | }; | ||
35 | |||
36 | static const struct of_device_id dw_plat_pcie_of_match[]; | ||
37 | |||
38 | static int dw_plat_pcie_host_init(struct pcie_port *pp) | ||
39 | { | ||
40 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
41 | |||
42 | dw_pcie_setup_rc(pp); | ||
43 | dw_pcie_wait_for_link(pci); | ||
44 | |||
45 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
46 | dw_pcie_msi_init(pp); | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | static void dw_plat_set_num_vectors(struct pcie_port *pp) | ||
52 | { | ||
53 | pp->num_vectors = MAX_MSI_IRQS; | ||
54 | } | ||
55 | |||
56 | static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { | ||
57 | .host_init = dw_plat_pcie_host_init, | ||
58 | .set_num_vectors = dw_plat_set_num_vectors, | ||
59 | }; | ||
60 | |||
61 | static int dw_plat_pcie_establish_link(struct dw_pcie *pci) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
67 | .start_link = dw_plat_pcie_establish_link, | ||
68 | }; | ||
69 | |||
70 | static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) | ||
71 | { | ||
72 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
73 | enum pci_barno bar; | ||
74 | |||
75 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
76 | dw_pcie_ep_reset_bar(pci, bar); | ||
77 | } | ||
78 | |||
79 | static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
80 | enum pci_epc_irq_type type, | ||
81 | u8 interrupt_num) | ||
82 | { | ||
83 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
84 | |||
85 | switch (type) { | ||
86 | case PCI_EPC_IRQ_LEGACY: | ||
87 | dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); | ||
88 | return -EINVAL; | ||
89 | case PCI_EPC_IRQ_MSI: | ||
90 | return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); | ||
91 | default: | ||
92 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
93 | } | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static struct dw_pcie_ep_ops pcie_ep_ops = { | ||
99 | .ep_init = dw_plat_pcie_ep_init, | ||
100 | .raise_irq = dw_plat_pcie_ep_raise_irq, | ||
101 | }; | ||
102 | |||
103 | static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, | ||
104 | struct platform_device *pdev) | ||
105 | { | ||
106 | struct dw_pcie *pci = dw_plat_pcie->pci; | ||
107 | struct pcie_port *pp = &pci->pp; | ||
108 | struct device *dev = &pdev->dev; | ||
109 | int ret; | ||
110 | |||
111 | pp->irq = platform_get_irq(pdev, 1); | ||
112 | if (pp->irq < 0) | ||
113 | return pp->irq; | ||
114 | |||
115 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
116 | pp->msi_irq = platform_get_irq(pdev, 0); | ||
117 | if (pp->msi_irq < 0) | ||
118 | return pp->msi_irq; | ||
119 | } | ||
120 | |||
121 | pp->root_bus_nr = -1; | ||
122 | pp->ops = &dw_plat_pcie_host_ops; | ||
123 | |||
124 | ret = dw_pcie_host_init(pp); | ||
125 | if (ret) { | ||
126 | dev_err(dev, "Failed to initialize host\n"); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, | ||
134 | struct platform_device *pdev) | ||
135 | { | ||
136 | int ret; | ||
137 | struct dw_pcie_ep *ep; | ||
138 | struct resource *res; | ||
139 | struct device *dev = &pdev->dev; | ||
140 | struct dw_pcie *pci = dw_plat_pcie->pci; | ||
141 | |||
142 | ep = &pci->ep; | ||
143 | ep->ops = &pcie_ep_ops; | ||
144 | |||
145 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); | ||
146 | pci->dbi_base2 = devm_ioremap_resource(dev, res); | ||
147 | if (IS_ERR(pci->dbi_base2)) | ||
148 | return PTR_ERR(pci->dbi_base2); | ||
149 | |||
150 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
151 | if (!res) | ||
152 | return -EINVAL; | ||
153 | |||
154 | ep->phys_base = res->start; | ||
155 | ep->addr_size = resource_size(res); | ||
156 | |||
157 | ret = dw_pcie_ep_init(ep); | ||
158 | if (ret) { | ||
159 | dev_err(dev, "Failed to initialize endpoint\n"); | ||
160 | return ret; | ||
161 | } | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int dw_plat_pcie_probe(struct platform_device *pdev) | ||
166 | { | ||
167 | struct device *dev = &pdev->dev; | ||
168 | struct dw_plat_pcie *dw_plat_pcie; | ||
169 | struct dw_pcie *pci; | ||
170 | struct resource *res; /* Resource from DT */ | ||
171 | int ret; | ||
172 | const struct of_device_id *match; | ||
173 | const struct dw_plat_pcie_of_data *data; | ||
174 | enum dw_pcie_device_mode mode; | ||
175 | |||
176 | match = of_match_device(dw_plat_pcie_of_match, dev); | ||
177 | if (!match) | ||
178 | return -EINVAL; | ||
179 | |||
180 | data = (struct dw_plat_pcie_of_data *)match->data; | ||
181 | mode = (enum dw_pcie_device_mode)data->mode; | ||
182 | |||
183 | dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); | ||
184 | if (!dw_plat_pcie) | ||
185 | return -ENOMEM; | ||
186 | |||
187 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
188 | if (!pci) | ||
189 | return -ENOMEM; | ||
190 | |||
191 | pci->dev = dev; | ||
192 | pci->ops = &dw_pcie_ops; | ||
193 | |||
194 | dw_plat_pcie->pci = pci; | ||
195 | dw_plat_pcie->mode = mode; | ||
196 | |||
197 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
198 | if (!res) | ||
199 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
200 | |||
201 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
202 | if (IS_ERR(pci->dbi_base)) | ||
203 | return PTR_ERR(pci->dbi_base); | ||
204 | |||
205 | platform_set_drvdata(pdev, dw_plat_pcie); | ||
206 | |||
207 | switch (dw_plat_pcie->mode) { | ||
208 | case DW_PCIE_RC_TYPE: | ||
209 | if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST)) | ||
210 | return -ENODEV; | ||
211 | |||
212 | ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); | ||
213 | if (ret < 0) | ||
214 | return ret; | ||
215 | break; | ||
216 | case DW_PCIE_EP_TYPE: | ||
217 | if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP)) | ||
218 | return -ENODEV; | ||
219 | |||
220 | ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev); | ||
221 | if (ret < 0) | ||
222 | return ret; | ||
223 | break; | ||
224 | default: | ||
225 | dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); | ||
226 | } | ||
227 | |||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { | ||
232 | .mode = DW_PCIE_RC_TYPE, | ||
233 | }; | ||
234 | |||
235 | static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = { | ||
236 | .mode = DW_PCIE_EP_TYPE, | ||
237 | }; | ||
238 | |||
239 | static const struct of_device_id dw_plat_pcie_of_match[] = { | ||
240 | { | ||
241 | .compatible = "snps,dw-pcie", | ||
242 | .data = &dw_plat_pcie_rc_of_data, | ||
243 | }, | ||
244 | { | ||
245 | .compatible = "snps,dw-pcie-ep", | ||
246 | .data = &dw_plat_pcie_ep_of_data, | ||
247 | }, | ||
248 | {}, | ||
249 | }; | ||
250 | |||
251 | static struct platform_driver dw_plat_pcie_driver = { | ||
252 | .driver = { | ||
253 | .name = "dw-pcie", | ||
254 | .of_match_table = dw_plat_pcie_of_match, | ||
255 | .suppress_bind_attrs = true, | ||
256 | }, | ||
257 | .probe = dw_plat_pcie_probe, | ||
258 | }; | ||
259 | builtin_platform_driver(dw_plat_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c new file mode 100644 index 000000000000..778c4f76a884 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.c | |||
@@ -0,0 +1,394 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Synopsys DesignWare PCIe host controller driver | ||
4 | * | ||
5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
6 | * http://www.samsung.com | ||
7 | * | ||
8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/delay.h> | ||
12 | #include <linux/of.h> | ||
13 | #include <linux/types.h> | ||
14 | |||
15 | #include "pcie-designware.h" | ||
16 | |||
17 | /* PCIe Port Logic registers */ | ||
18 | #define PLR_OFFSET 0x700 | ||
19 | #define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) | ||
20 | #define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) | ||
21 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) | ||
22 | |||
23 | int dw_pcie_read(void __iomem *addr, int size, u32 *val) | ||
24 | { | ||
25 | if ((uintptr_t)addr & (size - 1)) { | ||
26 | *val = 0; | ||
27 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
28 | } | ||
29 | |||
30 | if (size == 4) { | ||
31 | *val = readl(addr); | ||
32 | } else if (size == 2) { | ||
33 | *val = readw(addr); | ||
34 | } else if (size == 1) { | ||
35 | *val = readb(addr); | ||
36 | } else { | ||
37 | *val = 0; | ||
38 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
39 | } | ||
40 | |||
41 | return PCIBIOS_SUCCESSFUL; | ||
42 | } | ||
43 | |||
44 | int dw_pcie_write(void __iomem *addr, int size, u32 val) | ||
45 | { | ||
46 | if ((uintptr_t)addr & (size - 1)) | ||
47 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
48 | |||
49 | if (size == 4) | ||
50 | writel(val, addr); | ||
51 | else if (size == 2) | ||
52 | writew(val, addr); | ||
53 | else if (size == 1) | ||
54 | writeb(val, addr); | ||
55 | else | ||
56 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
57 | |||
58 | return PCIBIOS_SUCCESSFUL; | ||
59 | } | ||
60 | |||
61 | u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
62 | size_t size) | ||
63 | { | ||
64 | int ret; | ||
65 | u32 val; | ||
66 | |||
67 | if (pci->ops->read_dbi) | ||
68 | return pci->ops->read_dbi(pci, base, reg, size); | ||
69 | |||
70 | ret = dw_pcie_read(base + reg, size, &val); | ||
71 | if (ret) | ||
72 | dev_err(pci->dev, "Read DBI address failed\n"); | ||
73 | |||
74 | return val; | ||
75 | } | ||
76 | |||
77 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
78 | size_t size, u32 val) | ||
79 | { | ||
80 | int ret; | ||
81 | |||
82 | if (pci->ops->write_dbi) { | ||
83 | pci->ops->write_dbi(pci, base, reg, size, val); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | ret = dw_pcie_write(base + reg, size, val); | ||
88 | if (ret) | ||
89 | dev_err(pci->dev, "Write DBI address failed\n"); | ||
90 | } | ||
91 | |||
92 | static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) | ||
93 | { | ||
94 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | ||
95 | |||
96 | return dw_pcie_readl_dbi(pci, offset + reg); | ||
97 | } | ||
98 | |||
99 | static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, | ||
100 | u32 val) | ||
101 | { | ||
102 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | ||
103 | |||
104 | dw_pcie_writel_dbi(pci, offset + reg, val); | ||
105 | } | ||
106 | |||
107 | static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, | ||
108 | int type, u64 cpu_addr, | ||
109 | u64 pci_addr, u32 size) | ||
110 | { | ||
111 | u32 retries, val; | ||
112 | |||
113 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, | ||
114 | lower_32_bits(cpu_addr)); | ||
115 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, | ||
116 | upper_32_bits(cpu_addr)); | ||
117 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, | ||
118 | lower_32_bits(cpu_addr + size - 1)); | ||
119 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, | ||
120 | lower_32_bits(pci_addr)); | ||
121 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, | ||
122 | upper_32_bits(pci_addr)); | ||
123 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, | ||
124 | type); | ||
125 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, | ||
126 | PCIE_ATU_ENABLE); | ||
127 | |||
128 | /* | ||
129 | * Make sure ATU enable takes effect before any subsequent config | ||
130 | * and I/O accesses. | ||
131 | */ | ||
132 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
133 | val = dw_pcie_readl_ob_unroll(pci, index, | ||
134 | PCIE_ATU_UNR_REGION_CTRL2); | ||
135 | if (val & PCIE_ATU_ENABLE) | ||
136 | return; | ||
137 | |||
138 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
139 | } | ||
140 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | ||
141 | } | ||
142 | |||
143 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, | ||
144 | u64 cpu_addr, u64 pci_addr, u32 size) | ||
145 | { | ||
146 | u32 retries, val; | ||
147 | |||
148 | if (pci->ops->cpu_addr_fixup) | ||
149 | cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); | ||
150 | |||
151 | if (pci->iatu_unroll_enabled) { | ||
152 | dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, | ||
153 | pci_addr, size); | ||
154 | return; | ||
155 | } | ||
156 | |||
157 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, | ||
158 | PCIE_ATU_REGION_OUTBOUND | index); | ||
159 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, | ||
160 | lower_32_bits(cpu_addr)); | ||
161 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, | ||
162 | upper_32_bits(cpu_addr)); | ||
163 | dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, | ||
164 | lower_32_bits(cpu_addr + size - 1)); | ||
165 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, | ||
166 | lower_32_bits(pci_addr)); | ||
167 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, | ||
168 | upper_32_bits(pci_addr)); | ||
169 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); | ||
170 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); | ||
171 | |||
172 | /* | ||
173 | * Make sure ATU enable takes effect before any subsequent config | ||
174 | * and I/O accesses. | ||
175 | */ | ||
176 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
177 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); | ||
178 | if (val & PCIE_ATU_ENABLE) | ||
179 | return; | ||
180 | |||
181 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
182 | } | ||
183 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | ||
184 | } | ||
185 | |||
186 | static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) | ||
187 | { | ||
188 | u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); | ||
189 | |||
190 | return dw_pcie_readl_dbi(pci, offset + reg); | ||
191 | } | ||
192 | |||
193 | static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, | ||
194 | u32 val) | ||
195 | { | ||
196 | u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); | ||
197 | |||
198 | dw_pcie_writel_dbi(pci, offset + reg, val); | ||
199 | } | ||
200 | |||
201 | static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, | ||
202 | int bar, u64 cpu_addr, | ||
203 | enum dw_pcie_as_type as_type) | ||
204 | { | ||
205 | int type; | ||
206 | u32 retries, val; | ||
207 | |||
208 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, | ||
209 | lower_32_bits(cpu_addr)); | ||
210 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, | ||
211 | upper_32_bits(cpu_addr)); | ||
212 | |||
213 | switch (as_type) { | ||
214 | case DW_PCIE_AS_MEM: | ||
215 | type = PCIE_ATU_TYPE_MEM; | ||
216 | break; | ||
217 | case DW_PCIE_AS_IO: | ||
218 | type = PCIE_ATU_TYPE_IO; | ||
219 | break; | ||
220 | default: | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | |||
224 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); | ||
225 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, | ||
226 | PCIE_ATU_ENABLE | | ||
227 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); | ||
228 | |||
229 | /* | ||
230 | * Make sure ATU enable takes effect before any subsequent config | ||
231 | * and I/O accesses. | ||
232 | */ | ||
233 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
234 | val = dw_pcie_readl_ib_unroll(pci, index, | ||
235 | PCIE_ATU_UNR_REGION_CTRL2); | ||
236 | if (val & PCIE_ATU_ENABLE) | ||
237 | return 0; | ||
238 | |||
239 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
240 | } | ||
241 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | ||
242 | |||
243 | return -EBUSY; | ||
244 | } | ||
245 | |||
246 | int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | ||
247 | u64 cpu_addr, enum dw_pcie_as_type as_type) | ||
248 | { | ||
249 | int type; | ||
250 | u32 retries, val; | ||
251 | |||
252 | if (pci->iatu_unroll_enabled) | ||
253 | return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, | ||
254 | cpu_addr, as_type); | ||
255 | |||
256 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | | ||
257 | index); | ||
258 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); | ||
259 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); | ||
260 | |||
261 | switch (as_type) { | ||
262 | case DW_PCIE_AS_MEM: | ||
263 | type = PCIE_ATU_TYPE_MEM; | ||
264 | break; | ||
265 | case DW_PCIE_AS_IO: | ||
266 | type = PCIE_ATU_TYPE_IO; | ||
267 | break; | ||
268 | default: | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); | ||
273 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | ||
274 | | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); | ||
275 | |||
276 | /* | ||
277 | * Make sure ATU enable takes effect before any subsequent config | ||
278 | * and I/O accesses. | ||
279 | */ | ||
280 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
281 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); | ||
282 | if (val & PCIE_ATU_ENABLE) | ||
283 | return 0; | ||
284 | |||
285 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
286 | } | ||
287 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | ||
288 | |||
289 | return -EBUSY; | ||
290 | } | ||
291 | |||
292 | void dw_pcie_disable_atu(struct dw_pcie *pci, int index, | ||
293 | enum dw_pcie_region_type type) | ||
294 | { | ||
295 | int region; | ||
296 | |||
297 | switch (type) { | ||
298 | case DW_PCIE_REGION_INBOUND: | ||
299 | region = PCIE_ATU_REGION_INBOUND; | ||
300 | break; | ||
301 | case DW_PCIE_REGION_OUTBOUND: | ||
302 | region = PCIE_ATU_REGION_OUTBOUND; | ||
303 | break; | ||
304 | default: | ||
305 | return; | ||
306 | } | ||
307 | |||
308 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); | ||
309 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); | ||
310 | } | ||
311 | |||
312 | int dw_pcie_wait_for_link(struct dw_pcie *pci) | ||
313 | { | ||
314 | int retries; | ||
315 | |||
316 | /* Check if the link is up or not */ | ||
317 | for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { | ||
318 | if (dw_pcie_link_up(pci)) { | ||
319 | dev_info(pci->dev, "Link up\n"); | ||
320 | return 0; | ||
321 | } | ||
322 | usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); | ||
323 | } | ||
324 | |||
325 | dev_err(pci->dev, "Phy link never came up\n"); | ||
326 | |||
327 | return -ETIMEDOUT; | ||
328 | } | ||
329 | |||
330 | int dw_pcie_link_up(struct dw_pcie *pci) | ||
331 | { | ||
332 | u32 val; | ||
333 | |||
334 | if (pci->ops->link_up) | ||
335 | return pci->ops->link_up(pci); | ||
336 | |||
337 | val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1); | ||
338 | return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && | ||
339 | (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); | ||
340 | } | ||
341 | |||
342 | void dw_pcie_setup(struct dw_pcie *pci) | ||
343 | { | ||
344 | int ret; | ||
345 | u32 val; | ||
346 | u32 lanes; | ||
347 | struct device *dev = pci->dev; | ||
348 | struct device_node *np = dev->of_node; | ||
349 | |||
350 | ret = of_property_read_u32(np, "num-lanes", &lanes); | ||
351 | if (ret) | ||
352 | lanes = 0; | ||
353 | |||
354 | /* Set the number of lanes */ | ||
355 | val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); | ||
356 | val &= ~PORT_LINK_MODE_MASK; | ||
357 | switch (lanes) { | ||
358 | case 1: | ||
359 | val |= PORT_LINK_MODE_1_LANES; | ||
360 | break; | ||
361 | case 2: | ||
362 | val |= PORT_LINK_MODE_2_LANES; | ||
363 | break; | ||
364 | case 4: | ||
365 | val |= PORT_LINK_MODE_4_LANES; | ||
366 | break; | ||
367 | case 8: | ||
368 | val |= PORT_LINK_MODE_8_LANES; | ||
369 | break; | ||
370 | default: | ||
371 | dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); | ||
372 | return; | ||
373 | } | ||
374 | dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); | ||
375 | |||
376 | /* Set link width speed control register */ | ||
377 | val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
378 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; | ||
379 | switch (lanes) { | ||
380 | case 1: | ||
381 | val |= PORT_LOGIC_LINK_WIDTH_1_LANES; | ||
382 | break; | ||
383 | case 2: | ||
384 | val |= PORT_LOGIC_LINK_WIDTH_2_LANES; | ||
385 | break; | ||
386 | case 4: | ||
387 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; | ||
388 | break; | ||
389 | case 8: | ||
390 | val |= PORT_LOGIC_LINK_WIDTH_8_LANES; | ||
391 | break; | ||
392 | } | ||
393 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); | ||
394 | } | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h new file mode 100644 index 000000000000..bee4e2535a61 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.h | |||
@@ -0,0 +1,387 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Synopsys DesignWare PCIe host controller driver | ||
4 | * | ||
5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
6 | * http://www.samsung.com | ||
7 | * | ||
8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
9 | */ | ||
10 | |||
11 | #ifndef _PCIE_DESIGNWARE_H | ||
12 | #define _PCIE_DESIGNWARE_H | ||
13 | |||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/msi.h> | ||
17 | #include <linux/pci.h> | ||
18 | |||
19 | #include <linux/pci-epc.h> | ||
20 | #include <linux/pci-epf.h> | ||
21 | |||
22 | /* Parameters for the waiting for link up routine */ | ||
23 | #define LINK_WAIT_MAX_RETRIES 10 | ||
24 | #define LINK_WAIT_USLEEP_MIN 90000 | ||
25 | #define LINK_WAIT_USLEEP_MAX 100000 | ||
26 | |||
27 | /* Parameters for the waiting for iATU enabled routine */ | ||
28 | #define LINK_WAIT_MAX_IATU_RETRIES 5 | ||
29 | #define LINK_WAIT_IATU_MIN 9000 | ||
30 | #define LINK_WAIT_IATU_MAX 10000 | ||
31 | |||
32 | /* Synopsys-specific PCIe configuration registers */ | ||
33 | #define PCIE_PORT_LINK_CONTROL 0x710 | ||
34 | #define PORT_LINK_MODE_MASK (0x3f << 16) | ||
35 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) | ||
36 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) | ||
37 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) | ||
38 | #define PORT_LINK_MODE_8_LANES (0xf << 16) | ||
39 | |||
40 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | ||
41 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | ||
42 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) | ||
43 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) | ||
44 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) | ||
45 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) | ||
46 | #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) | ||
47 | |||
48 | #define PCIE_MSI_ADDR_LO 0x820 | ||
49 | #define PCIE_MSI_ADDR_HI 0x824 | ||
50 | #define PCIE_MSI_INTR0_ENABLE 0x828 | ||
51 | #define PCIE_MSI_INTR0_MASK 0x82C | ||
52 | #define PCIE_MSI_INTR0_STATUS 0x830 | ||
53 | |||
54 | #define PCIE_ATU_VIEWPORT 0x900 | ||
55 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) | ||
56 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) | ||
57 | #define PCIE_ATU_REGION_INDEX2 (0x2 << 0) | ||
58 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) | ||
59 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) | ||
60 | #define PCIE_ATU_CR1 0x904 | ||
61 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) | ||
62 | #define PCIE_ATU_TYPE_IO (0x2 << 0) | ||
63 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) | ||
64 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) | ||
65 | #define PCIE_ATU_CR2 0x908 | ||
66 | #define PCIE_ATU_ENABLE (0x1 << 31) | ||
67 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) | ||
68 | #define PCIE_ATU_LOWER_BASE 0x90C | ||
69 | #define PCIE_ATU_UPPER_BASE 0x910 | ||
70 | #define PCIE_ATU_LIMIT 0x914 | ||
71 | #define PCIE_ATU_LOWER_TARGET 0x918 | ||
72 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) | ||
73 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) | ||
74 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) | ||
75 | #define PCIE_ATU_UPPER_TARGET 0x91C | ||
76 | |||
77 | #define PCIE_MISC_CONTROL_1_OFF 0x8BC | ||
78 | #define PCIE_DBI_RO_WR_EN (0x1 << 0) | ||
79 | |||
80 | /* | ||
81 | * iATU Unroll-specific register definitions | ||
82 | * From 4.80 core version the address translation will be made by unroll | ||
83 | */ | ||
84 | #define PCIE_ATU_UNR_REGION_CTRL1 0x00 | ||
85 | #define PCIE_ATU_UNR_REGION_CTRL2 0x04 | ||
86 | #define PCIE_ATU_UNR_LOWER_BASE 0x08 | ||
87 | #define PCIE_ATU_UNR_UPPER_BASE 0x0C | ||
88 | #define PCIE_ATU_UNR_LIMIT 0x10 | ||
89 | #define PCIE_ATU_UNR_LOWER_TARGET 0x14 | ||
90 | #define PCIE_ATU_UNR_UPPER_TARGET 0x18 | ||
91 | |||
92 | /* Register address builder */ | ||
93 | #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ | ||
94 | ((0x3 << 20) | ((region) << 9)) | ||
95 | |||
96 | #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ | ||
97 | ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) | ||
98 | |||
99 | #define MSI_MESSAGE_CONTROL 0x52 | ||
100 | #define MSI_CAP_MMC_SHIFT 1 | ||
101 | #define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT) | ||
102 | #define MSI_CAP_MME_SHIFT 4 | ||
103 | #define MSI_CAP_MSI_EN_MASK 0x1 | ||
104 | #define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) | ||
105 | #define MSI_MESSAGE_ADDR_L32 0x54 | ||
106 | #define MSI_MESSAGE_ADDR_U32 0x58 | ||
107 | #define MSI_MESSAGE_DATA_32 0x58 | ||
108 | #define MSI_MESSAGE_DATA_64 0x5C | ||
109 | |||
110 | #define MAX_MSI_IRQS 256 | ||
111 | #define MAX_MSI_IRQS_PER_CTRL 32 | ||
112 | #define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) | ||
113 | #define MSI_REG_CTRL_BLOCK_SIZE 12 | ||
114 | #define MSI_DEF_NUM_VECTORS 32 | ||
115 | |||
116 | /* Maximum number of inbound/outbound iATUs */ | ||
117 | #define MAX_IATU_IN 256 | ||
118 | #define MAX_IATU_OUT 256 | ||
119 | |||
120 | struct pcie_port; | ||
121 | struct dw_pcie; | ||
122 | struct dw_pcie_ep; | ||
123 | |||
124 | enum dw_pcie_region_type { | ||
125 | DW_PCIE_REGION_UNKNOWN, | ||
126 | DW_PCIE_REGION_INBOUND, | ||
127 | DW_PCIE_REGION_OUTBOUND, | ||
128 | }; | ||
129 | |||
130 | enum dw_pcie_device_mode { | ||
131 | DW_PCIE_UNKNOWN_TYPE, | ||
132 | DW_PCIE_EP_TYPE, | ||
133 | DW_PCIE_LEG_EP_TYPE, | ||
134 | DW_PCIE_RC_TYPE, | ||
135 | }; | ||
136 | |||
137 | struct dw_pcie_host_ops { | ||
138 | int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); | ||
139 | int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); | ||
140 | int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, | ||
141 | unsigned int devfn, int where, int size, u32 *val); | ||
142 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, | ||
143 | unsigned int devfn, int where, int size, u32 val); | ||
144 | int (*host_init)(struct pcie_port *pp); | ||
145 | void (*msi_set_irq)(struct pcie_port *pp, int irq); | ||
146 | void (*msi_clear_irq)(struct pcie_port *pp, int irq); | ||
147 | phys_addr_t (*get_msi_addr)(struct pcie_port *pp); | ||
148 | u32 (*get_msi_data)(struct pcie_port *pp, int pos); | ||
149 | void (*scan_bus)(struct pcie_port *pp); | ||
150 | void (*set_num_vectors)(struct pcie_port *pp); | ||
151 | int (*msi_host_init)(struct pcie_port *pp); | ||
152 | void (*msi_irq_ack)(int irq, struct pcie_port *pp); | ||
153 | }; | ||
154 | |||
155 | struct pcie_port { | ||
156 | u8 root_bus_nr; | ||
157 | u64 cfg0_base; | ||
158 | void __iomem *va_cfg0_base; | ||
159 | u32 cfg0_size; | ||
160 | u64 cfg1_base; | ||
161 | void __iomem *va_cfg1_base; | ||
162 | u32 cfg1_size; | ||
163 | resource_size_t io_base; | ||
164 | phys_addr_t io_bus_addr; | ||
165 | u32 io_size; | ||
166 | u64 mem_base; | ||
167 | phys_addr_t mem_bus_addr; | ||
168 | u32 mem_size; | ||
169 | struct resource *cfg; | ||
170 | struct resource *io; | ||
171 | struct resource *mem; | ||
172 | struct resource *busn; | ||
173 | int irq; | ||
174 | const struct dw_pcie_host_ops *ops; | ||
175 | int msi_irq; | ||
176 | struct irq_domain *irq_domain; | ||
177 | struct irq_domain *msi_domain; | ||
178 | dma_addr_t msi_data; | ||
179 | u32 num_vectors; | ||
180 | u32 irq_status[MAX_MSI_CTRLS]; | ||
181 | raw_spinlock_t lock; | ||
182 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); | ||
183 | }; | ||
184 | |||
185 | enum dw_pcie_as_type { | ||
186 | DW_PCIE_AS_UNKNOWN, | ||
187 | DW_PCIE_AS_MEM, | ||
188 | DW_PCIE_AS_IO, | ||
189 | }; | ||
190 | |||
191 | struct dw_pcie_ep_ops { | ||
192 | void (*ep_init)(struct dw_pcie_ep *ep); | ||
193 | int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, | ||
194 | enum pci_epc_irq_type type, u8 interrupt_num); | ||
195 | }; | ||
196 | |||
197 | struct dw_pcie_ep { | ||
198 | struct pci_epc *epc; | ||
199 | struct dw_pcie_ep_ops *ops; | ||
200 | phys_addr_t phys_base; | ||
201 | size_t addr_size; | ||
202 | size_t page_size; | ||
203 | u8 bar_to_atu[6]; | ||
204 | phys_addr_t *outbound_addr; | ||
205 | unsigned long *ib_window_map; | ||
206 | unsigned long *ob_window_map; | ||
207 | u32 num_ib_windows; | ||
208 | u32 num_ob_windows; | ||
209 | void __iomem *msi_mem; | ||
210 | phys_addr_t msi_mem_phys; | ||
211 | }; | ||
212 | |||
213 | struct dw_pcie_ops { | ||
214 | u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr); | ||
215 | u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
216 | size_t size); | ||
217 | void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
218 | size_t size, u32 val); | ||
219 | int (*link_up)(struct dw_pcie *pcie); | ||
220 | int (*start_link)(struct dw_pcie *pcie); | ||
221 | void (*stop_link)(struct dw_pcie *pcie); | ||
222 | }; | ||
223 | |||
224 | struct dw_pcie { | ||
225 | struct device *dev; | ||
226 | void __iomem *dbi_base; | ||
227 | void __iomem *dbi_base2; | ||
228 | u32 num_viewport; | ||
229 | u8 iatu_unroll_enabled; | ||
230 | struct pcie_port pp; | ||
231 | struct dw_pcie_ep ep; | ||
232 | const struct dw_pcie_ops *ops; | ||
233 | }; | ||
234 | |||
235 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) | ||
236 | |||
237 | #define to_dw_pcie_from_ep(endpoint) \ | ||
238 | container_of((endpoint), struct dw_pcie, ep) | ||
239 | |||
240 | int dw_pcie_read(void __iomem *addr, int size, u32 *val); | ||
241 | int dw_pcie_write(void __iomem *addr, int size, u32 val); | ||
242 | |||
243 | u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
244 | size_t size); | ||
245 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
246 | size_t size, u32 val); | ||
247 | int dw_pcie_link_up(struct dw_pcie *pci); | ||
248 | int dw_pcie_wait_for_link(struct dw_pcie *pci); | ||
249 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, | ||
250 | int type, u64 cpu_addr, u64 pci_addr, | ||
251 | u32 size); | ||
252 | int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | ||
253 | u64 cpu_addr, enum dw_pcie_as_type as_type); | ||
254 | void dw_pcie_disable_atu(struct dw_pcie *pci, int index, | ||
255 | enum dw_pcie_region_type type); | ||
256 | void dw_pcie_setup(struct dw_pcie *pci); | ||
257 | |||
258 | static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) | ||
259 | { | ||
260 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); | ||
261 | } | ||
262 | |||
263 | static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) | ||
264 | { | ||
265 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); | ||
266 | } | ||
267 | |||
268 | static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) | ||
269 | { | ||
270 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); | ||
271 | } | ||
272 | |||
273 | static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) | ||
274 | { | ||
275 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); | ||
276 | } | ||
277 | |||
278 | static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) | ||
279 | { | ||
280 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); | ||
281 | } | ||
282 | |||
283 | static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) | ||
284 | { | ||
285 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); | ||
286 | } | ||
287 | |||
288 | static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) | ||
289 | { | ||
290 | __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); | ||
291 | } | ||
292 | |||
293 | static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) | ||
294 | { | ||
295 | return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); | ||
296 | } | ||
297 | |||
298 | static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) | ||
299 | { | ||
300 | u32 reg; | ||
301 | u32 val; | ||
302 | |||
303 | reg = PCIE_MISC_CONTROL_1_OFF; | ||
304 | val = dw_pcie_readl_dbi(pci, reg); | ||
305 | val |= PCIE_DBI_RO_WR_EN; | ||
306 | dw_pcie_writel_dbi(pci, reg, val); | ||
307 | } | ||
308 | |||
309 | static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) | ||
310 | { | ||
311 | u32 reg; | ||
312 | u32 val; | ||
313 | |||
314 | reg = PCIE_MISC_CONTROL_1_OFF; | ||
315 | val = dw_pcie_readl_dbi(pci, reg); | ||
316 | val &= ~PCIE_DBI_RO_WR_EN; | ||
317 | dw_pcie_writel_dbi(pci, reg, val); | ||
318 | } | ||
319 | |||
320 | #ifdef CONFIG_PCIE_DW_HOST | ||
321 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); | ||
322 | void dw_pcie_msi_init(struct pcie_port *pp); | ||
323 | void dw_pcie_free_msi(struct pcie_port *pp); | ||
324 | void dw_pcie_setup_rc(struct pcie_port *pp); | ||
325 | int dw_pcie_host_init(struct pcie_port *pp); | ||
326 | int dw_pcie_allocate_domains(struct pcie_port *pp); | ||
327 | #else | ||
328 | static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) | ||
329 | { | ||
330 | return IRQ_NONE; | ||
331 | } | ||
332 | |||
333 | static inline void dw_pcie_msi_init(struct pcie_port *pp) | ||
334 | { | ||
335 | } | ||
336 | |||
337 | static inline void dw_pcie_free_msi(struct pcie_port *pp) | ||
338 | { | ||
339 | } | ||
340 | |||
341 | static inline void dw_pcie_setup_rc(struct pcie_port *pp) | ||
342 | { | ||
343 | } | ||
344 | |||
345 | static inline int dw_pcie_host_init(struct pcie_port *pp) | ||
346 | { | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | static inline int dw_pcie_allocate_domains(struct pcie_port *pp) | ||
351 | { | ||
352 | return 0; | ||
353 | } | ||
354 | #endif | ||
355 | |||
356 | #ifdef CONFIG_PCIE_DW_EP | ||
357 | void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); | ||
358 | int dw_pcie_ep_init(struct dw_pcie_ep *ep); | ||
359 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep); | ||
360 | int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
361 | u8 interrupt_num); | ||
362 | void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); | ||
363 | #else | ||
364 | static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) | ||
365 | { | ||
366 | } | ||
367 | |||
368 | static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) | ||
369 | { | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | ||
374 | { | ||
375 | } | ||
376 | |||
377 | static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
378 | u8 interrupt_num) | ||
379 | { | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) | ||
384 | { | ||
385 | } | ||
386 | #endif | ||
387 | #endif /* _PCIE_DESIGNWARE_H */ | ||
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c new file mode 100644 index 000000000000..6d9e1b2b8f7b --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-hisi.c | |||
@@ -0,0 +1,398 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for HiSilicon SoCs | ||
4 | * | ||
5 | * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com | ||
6 | * | ||
7 | * Authors: Zhou Wang <wangzhou1@hisilicon.com> | ||
8 | * Dacai Zhu <zhudacai@hisilicon.com> | ||
9 | * Gabriele Paoloni <gabriele.paoloni@huawei.com> | ||
10 | */ | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/mfd/syscon.h> | ||
14 | #include <linux/of_address.h> | ||
15 | #include <linux/of_pci.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/pci-acpi.h> | ||
20 | #include <linux/pci-ecam.h> | ||
21 | #include <linux/regmap.h> | ||
22 | #include "../../pci.h" | ||
23 | |||
24 | #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) | ||
25 | |||
26 | static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | ||
27 | int size, u32 *val) | ||
28 | { | ||
29 | struct pci_config_window *cfg = bus->sysdata; | ||
30 | int dev = PCI_SLOT(devfn); | ||
31 | |||
32 | if (bus->number == cfg->busr.start) { | ||
33 | /* access only one slot on each root port */ | ||
34 | if (dev > 0) | ||
35 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
36 | else | ||
37 | return pci_generic_config_read32(bus, devfn, where, | ||
38 | size, val); | ||
39 | } | ||
40 | |||
41 | return pci_generic_config_read(bus, devfn, where, size, val); | ||
42 | } | ||
43 | |||
44 | static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | ||
45 | int where, int size, u32 val) | ||
46 | { | ||
47 | struct pci_config_window *cfg = bus->sysdata; | ||
48 | int dev = PCI_SLOT(devfn); | ||
49 | |||
50 | if (bus->number == cfg->busr.start) { | ||
51 | /* access only one slot on each root port */ | ||
52 | if (dev > 0) | ||
53 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
54 | else | ||
55 | return pci_generic_config_write32(bus, devfn, where, | ||
56 | size, val); | ||
57 | } | ||
58 | |||
59 | return pci_generic_config_write(bus, devfn, where, size, val); | ||
60 | } | ||
61 | |||
62 | static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | ||
63 | int where) | ||
64 | { | ||
65 | struct pci_config_window *cfg = bus->sysdata; | ||
66 | void __iomem *reg_base = cfg->priv; | ||
67 | |||
68 | if (bus->number == cfg->busr.start) | ||
69 | return reg_base + where; | ||
70 | else | ||
71 | return pci_ecam_map_bus(bus, devfn, where); | ||
72 | } | ||
73 | |||
74 | #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) | ||
75 | |||
76 | static int hisi_pcie_init(struct pci_config_window *cfg) | ||
77 | { | ||
78 | struct device *dev = cfg->parent; | ||
79 | struct acpi_device *adev = to_acpi_device(dev); | ||
80 | struct acpi_pci_root *root = acpi_driver_data(adev); | ||
81 | struct resource *res; | ||
82 | void __iomem *reg_base; | ||
83 | int ret; | ||
84 | |||
85 | /* | ||
86 | * Retrieve RC base and size from a HISI0081 device with _UID | ||
87 | * matching our segment. | ||
88 | */ | ||
89 | res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); | ||
90 | if (!res) | ||
91 | return -ENOMEM; | ||
92 | |||
93 | ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res); | ||
94 | if (ret) { | ||
95 | dev_err(dev, "can't get rc base address\n"); | ||
96 | return -ENOMEM; | ||
97 | } | ||
98 | |||
99 | reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); | ||
100 | if (!reg_base) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | cfg->priv = reg_base; | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | struct pci_ecam_ops hisi_pcie_ops = { | ||
108 | .bus_shift = 20, | ||
109 | .init = hisi_pcie_init, | ||
110 | .pci_ops = { | ||
111 | .map_bus = hisi_pcie_map_bus, | ||
112 | .read = hisi_pcie_rd_conf, | ||
113 | .write = hisi_pcie_wr_conf, | ||
114 | } | ||
115 | }; | ||
116 | |||
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_PCI_HISI | ||
120 | |||
121 | #include "pcie-designware.h" | ||
122 | |||
123 | #define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 | ||
124 | #define PCIE_HIP06_CTRL_OFF 0x1000 | ||
125 | #define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) | ||
126 | #define PCIE_LTSSM_LINKUP_STATE 0x11 | ||
127 | #define PCIE_LTSSM_STATE_MASK 0x3F | ||
128 | |||
129 | #define to_hisi_pcie(x) dev_get_drvdata((x)->dev) | ||
130 | |||
131 | struct hisi_pcie; | ||
132 | |||
133 | struct pcie_soc_ops { | ||
134 | int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); | ||
135 | }; | ||
136 | |||
137 | struct hisi_pcie { | ||
138 | struct dw_pcie *pci; | ||
139 | struct regmap *subctrl; | ||
140 | u32 port_id; | ||
141 | const struct pcie_soc_ops *soc_ops; | ||
142 | }; | ||
143 | |||
144 | /* HipXX PCIe host only supports 32-bit config access */ | ||
145 | static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, | ||
146 | u32 *val) | ||
147 | { | ||
148 | u32 reg; | ||
149 | u32 reg_val; | ||
150 | void *walker = ®_val; | ||
151 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
152 | |||
153 | walker += (where & 0x3); | ||
154 | reg = where & ~0x3; | ||
155 | reg_val = dw_pcie_readl_dbi(pci, reg); | ||
156 | |||
157 | if (size == 1) | ||
158 | *val = *(u8 __force *) walker; | ||
159 | else if (size == 2) | ||
160 | *val = *(u16 __force *) walker; | ||
161 | else if (size == 4) | ||
162 | *val = reg_val; | ||
163 | else | ||
164 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
165 | |||
166 | return PCIBIOS_SUCCESSFUL; | ||
167 | } | ||
168 | |||
169 | /* HipXX PCIe host only supports 32-bit config access */ | ||
170 | static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, | ||
171 | u32 val) | ||
172 | { | ||
173 | u32 reg_val; | ||
174 | u32 reg; | ||
175 | void *walker = ®_val; | ||
176 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
177 | |||
178 | walker += (where & 0x3); | ||
179 | reg = where & ~0x3; | ||
180 | if (size == 4) | ||
181 | dw_pcie_writel_dbi(pci, reg, val); | ||
182 | else if (size == 2) { | ||
183 | reg_val = dw_pcie_readl_dbi(pci, reg); | ||
184 | *(u16 __force *) walker = val; | ||
185 | dw_pcie_writel_dbi(pci, reg, reg_val); | ||
186 | } else if (size == 1) { | ||
187 | reg_val = dw_pcie_readl_dbi(pci, reg); | ||
188 | *(u8 __force *) walker = val; | ||
189 | dw_pcie_writel_dbi(pci, reg, reg_val); | ||
190 | } else | ||
191 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
192 | |||
193 | return PCIBIOS_SUCCESSFUL; | ||
194 | } | ||
195 | |||
196 | static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) | ||
197 | { | ||
198 | u32 val; | ||
199 | |||
200 | regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + | ||
201 | 0x100 * hisi_pcie->port_id, &val); | ||
202 | |||
203 | return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); | ||
204 | } | ||
205 | |||
206 | static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) | ||
207 | { | ||
208 | struct dw_pcie *pci = hisi_pcie->pci; | ||
209 | u32 val; | ||
210 | |||
211 | val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); | ||
212 | |||
213 | return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); | ||
214 | } | ||
215 | |||
216 | static int hisi_pcie_link_up(struct dw_pcie *pci) | ||
217 | { | ||
218 | struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); | ||
219 | |||
220 | return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); | ||
221 | } | ||
222 | |||
223 | static const struct dw_pcie_host_ops hisi_pcie_host_ops = { | ||
224 | .rd_own_conf = hisi_pcie_cfg_read, | ||
225 | .wr_own_conf = hisi_pcie_cfg_write, | ||
226 | }; | ||
227 | |||
228 | static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, | ||
229 | struct platform_device *pdev) | ||
230 | { | ||
231 | struct dw_pcie *pci = hisi_pcie->pci; | ||
232 | struct pcie_port *pp = &pci->pp; | ||
233 | struct device *dev = &pdev->dev; | ||
234 | int ret; | ||
235 | u32 port_id; | ||
236 | |||
237 | if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { | ||
238 | dev_err(dev, "failed to read port-id\n"); | ||
239 | return -EINVAL; | ||
240 | } | ||
241 | if (port_id > 3) { | ||
242 | dev_err(dev, "Invalid port-id: %d\n", port_id); | ||
243 | return -EINVAL; | ||
244 | } | ||
245 | hisi_pcie->port_id = port_id; | ||
246 | |||
247 | pp->ops = &hisi_pcie_host_ops; | ||
248 | |||
249 | ret = dw_pcie_host_init(pp); | ||
250 | if (ret) { | ||
251 | dev_err(dev, "failed to initialize host\n"); | ||
252 | return ret; | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
259 | .link_up = hisi_pcie_link_up, | ||
260 | }; | ||
261 | |||
262 | static int hisi_pcie_probe(struct platform_device *pdev) | ||
263 | { | ||
264 | struct device *dev = &pdev->dev; | ||
265 | struct dw_pcie *pci; | ||
266 | struct hisi_pcie *hisi_pcie; | ||
267 | struct resource *reg; | ||
268 | int ret; | ||
269 | |||
270 | hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); | ||
271 | if (!hisi_pcie) | ||
272 | return -ENOMEM; | ||
273 | |||
274 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
275 | if (!pci) | ||
276 | return -ENOMEM; | ||
277 | |||
278 | pci->dev = dev; | ||
279 | pci->ops = &dw_pcie_ops; | ||
280 | |||
281 | hisi_pcie->pci = pci; | ||
282 | |||
283 | hisi_pcie->soc_ops = of_device_get_match_data(dev); | ||
284 | |||
285 | hisi_pcie->subctrl = | ||
286 | syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); | ||
287 | if (IS_ERR(hisi_pcie->subctrl)) { | ||
288 | dev_err(dev, "cannot get subctrl base\n"); | ||
289 | return PTR_ERR(hisi_pcie->subctrl); | ||
290 | } | ||
291 | |||
292 | reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); | ||
293 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); | ||
294 | if (IS_ERR(pci->dbi_base)) | ||
295 | return PTR_ERR(pci->dbi_base); | ||
296 | platform_set_drvdata(pdev, hisi_pcie); | ||
297 | |||
298 | ret = hisi_add_pcie_port(hisi_pcie, pdev); | ||
299 | if (ret) | ||
300 | return ret; | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static struct pcie_soc_ops hip05_ops = { | ||
306 | &hisi_pcie_link_up_hip05 | ||
307 | }; | ||
308 | |||
309 | static struct pcie_soc_ops hip06_ops = { | ||
310 | &hisi_pcie_link_up_hip06 | ||
311 | }; | ||
312 | |||
313 | static const struct of_device_id hisi_pcie_of_match[] = { | ||
314 | { | ||
315 | .compatible = "hisilicon,hip05-pcie", | ||
316 | .data = (void *) &hip05_ops, | ||
317 | }, | ||
318 | { | ||
319 | .compatible = "hisilicon,hip06-pcie", | ||
320 | .data = (void *) &hip06_ops, | ||
321 | }, | ||
322 | {}, | ||
323 | }; | ||
324 | |||
325 | static struct platform_driver hisi_pcie_driver = { | ||
326 | .probe = hisi_pcie_probe, | ||
327 | .driver = { | ||
328 | .name = "hisi-pcie", | ||
329 | .of_match_table = hisi_pcie_of_match, | ||
330 | .suppress_bind_attrs = true, | ||
331 | }, | ||
332 | }; | ||
333 | builtin_platform_driver(hisi_pcie_driver); | ||
334 | |||
335 | static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev) | ||
336 | { | ||
337 | struct device *dev = &pdev->dev; | ||
338 | struct pci_ecam_ops *ops; | ||
339 | |||
340 | ops = (struct pci_ecam_ops *)of_device_get_match_data(dev); | ||
341 | return pci_host_common_probe(pdev, ops); | ||
342 | } | ||
343 | |||
344 | static int hisi_pcie_platform_init(struct pci_config_window *cfg) | ||
345 | { | ||
346 | struct device *dev = cfg->parent; | ||
347 | struct platform_device *pdev = to_platform_device(dev); | ||
348 | struct resource *res; | ||
349 | void __iomem *reg_base; | ||
350 | |||
351 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
352 | if (!res) { | ||
353 | dev_err(dev, "missing \"reg[1]\"property\n"); | ||
354 | return -EINVAL; | ||
355 | } | ||
356 | |||
357 | reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); | ||
358 | if (!reg_base) | ||
359 | return -ENOMEM; | ||
360 | |||
361 | cfg->priv = reg_base; | ||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | struct pci_ecam_ops hisi_pcie_platform_ops = { | ||
366 | .bus_shift = 20, | ||
367 | .init = hisi_pcie_platform_init, | ||
368 | .pci_ops = { | ||
369 | .map_bus = hisi_pcie_map_bus, | ||
370 | .read = hisi_pcie_rd_conf, | ||
371 | .write = hisi_pcie_wr_conf, | ||
372 | } | ||
373 | }; | ||
374 | |||
375 | static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { | ||
376 | { | ||
377 | .compatible = "hisilicon,hip06-pcie-ecam", | ||
378 | .data = (void *) &hisi_pcie_platform_ops, | ||
379 | }, | ||
380 | { | ||
381 | .compatible = "hisilicon,hip07-pcie-ecam", | ||
382 | .data = (void *) &hisi_pcie_platform_ops, | ||
383 | }, | ||
384 | {}, | ||
385 | }; | ||
386 | |||
387 | static struct platform_driver hisi_pcie_almost_ecam_driver = { | ||
388 | .probe = hisi_pcie_almost_ecam_probe, | ||
389 | .driver = { | ||
390 | .name = "hisi-pcie-almost-ecam", | ||
391 | .of_match_table = hisi_pcie_almost_ecam_of_match, | ||
392 | .suppress_bind_attrs = true, | ||
393 | }, | ||
394 | }; | ||
395 | builtin_platform_driver(hisi_pcie_almost_ecam_driver); | ||
396 | |||
397 | #endif | ||
398 | #endif | ||
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c new file mode 100644 index 000000000000..3611d6ce9a92 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-histb.c | |||
@@ -0,0 +1,472 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for HiSilicon STB SoCs | ||
4 | * | ||
5 | * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com | ||
6 | * | ||
7 | * Authors: Ruqiang Ju <juruqiang@hisilicon.com> | ||
8 | * Jianguo Sun <sunjianguo1@huawei.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/of_gpio.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/phy/phy.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/resource.h> | ||
22 | #include <linux/reset.h> | ||
23 | |||
24 | #include "pcie-designware.h" | ||
25 | |||
26 | #define to_histb_pcie(x) dev_get_drvdata((x)->dev) | ||
27 | |||
28 | #define PCIE_SYS_CTRL0 0x0000 | ||
29 | #define PCIE_SYS_CTRL1 0x0004 | ||
30 | #define PCIE_SYS_CTRL7 0x001C | ||
31 | #define PCIE_SYS_CTRL13 0x0034 | ||
32 | #define PCIE_SYS_CTRL15 0x003C | ||
33 | #define PCIE_SYS_CTRL16 0x0040 | ||
34 | #define PCIE_SYS_CTRL17 0x0044 | ||
35 | |||
36 | #define PCIE_SYS_STAT0 0x0100 | ||
37 | #define PCIE_SYS_STAT4 0x0110 | ||
38 | |||
39 | #define PCIE_RDLH_LINK_UP BIT(5) | ||
40 | #define PCIE_XMLH_LINK_UP BIT(15) | ||
41 | #define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) | ||
42 | #define PCIE_APP_LTSSM_ENABLE BIT(11) | ||
43 | |||
44 | #define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28) | ||
45 | #define PCIE_WM_EP 0 | ||
46 | #define PCIE_WM_LEGACY BIT(1) | ||
47 | #define PCIE_WM_RC BIT(30) | ||
48 | |||
49 | #define PCIE_LTSSM_STATE_MASK GENMASK(5, 0) | ||
50 | #define PCIE_LTSSM_STATE_ACTIVE 0x11 | ||
51 | |||
52 | struct histb_pcie { | ||
53 | struct dw_pcie *pci; | ||
54 | struct clk *aux_clk; | ||
55 | struct clk *pipe_clk; | ||
56 | struct clk *sys_clk; | ||
57 | struct clk *bus_clk; | ||
58 | struct phy *phy; | ||
59 | struct reset_control *soft_reset; | ||
60 | struct reset_control *sys_reset; | ||
61 | struct reset_control *bus_reset; | ||
62 | void __iomem *ctrl; | ||
63 | int reset_gpio; | ||
64 | struct regulator *vpcie; | ||
65 | }; | ||
66 | |||
67 | static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg) | ||
68 | { | ||
69 | return readl(histb_pcie->ctrl + reg); | ||
70 | } | ||
71 | |||
72 | static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val) | ||
73 | { | ||
74 | writel(val, histb_pcie->ctrl + reg); | ||
75 | } | ||
76 | |||
77 | static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) | ||
78 | { | ||
79 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
80 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
81 | u32 val; | ||
82 | |||
83 | val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); | ||
84 | if (enable) | ||
85 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
86 | else | ||
87 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
88 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val); | ||
89 | } | ||
90 | |||
91 | static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) | ||
92 | { | ||
93 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
94 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
95 | u32 val; | ||
96 | |||
97 | val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1); | ||
98 | if (enable) | ||
99 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
100 | else | ||
101 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
102 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val); | ||
103 | } | ||
104 | |||
105 | static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, | ||
106 | u32 reg, size_t size) | ||
107 | { | ||
108 | u32 val; | ||
109 | |||
110 | histb_pcie_dbi_r_mode(&pci->pp, true); | ||
111 | dw_pcie_read(base + reg, size, &val); | ||
112 | histb_pcie_dbi_r_mode(&pci->pp, false); | ||
113 | |||
114 | return val; | ||
115 | } | ||
116 | |||
117 | static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, | ||
118 | u32 reg, size_t size, u32 val) | ||
119 | { | ||
120 | histb_pcie_dbi_w_mode(&pci->pp, true); | ||
121 | dw_pcie_write(base + reg, size, val); | ||
122 | histb_pcie_dbi_w_mode(&pci->pp, false); | ||
123 | } | ||
124 | |||
125 | static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where, | ||
126 | int size, u32 *val) | ||
127 | { | ||
128 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
129 | int ret; | ||
130 | |||
131 | histb_pcie_dbi_r_mode(pp, true); | ||
132 | ret = dw_pcie_read(pci->dbi_base + where, size, val); | ||
133 | histb_pcie_dbi_r_mode(pp, false); | ||
134 | |||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where, | ||
139 | int size, u32 val) | ||
140 | { | ||
141 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
142 | int ret; | ||
143 | |||
144 | histb_pcie_dbi_w_mode(pp, true); | ||
145 | ret = dw_pcie_write(pci->dbi_base + where, size, val); | ||
146 | histb_pcie_dbi_w_mode(pp, false); | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | static int histb_pcie_link_up(struct dw_pcie *pci) | ||
152 | { | ||
153 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
154 | u32 regval; | ||
155 | u32 status; | ||
156 | |||
157 | regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0); | ||
158 | status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4); | ||
159 | status &= PCIE_LTSSM_STATE_MASK; | ||
160 | if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && | ||
161 | (status == PCIE_LTSSM_STATE_ACTIVE)) | ||
162 | return 1; | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static int histb_pcie_establish_link(struct pcie_port *pp) | ||
168 | { | ||
169 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
170 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
171 | u32 regval; | ||
172 | |||
173 | if (dw_pcie_link_up(pci)) { | ||
174 | dev_info(pci->dev, "Link already up\n"); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /* PCIe RC work mode */ | ||
179 | regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); | ||
180 | regval &= ~PCIE_DEVICE_TYPE_MASK; | ||
181 | regval |= PCIE_WM_RC; | ||
182 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval); | ||
183 | |||
184 | /* setup root complex */ | ||
185 | dw_pcie_setup_rc(pp); | ||
186 | |||
187 | /* assert LTSSM enable */ | ||
188 | regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7); | ||
189 | regval |= PCIE_APP_LTSSM_ENABLE; | ||
190 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval); | ||
191 | |||
192 | return dw_pcie_wait_for_link(pci); | ||
193 | } | ||
194 | |||
195 | static int histb_pcie_host_init(struct pcie_port *pp) | ||
196 | { | ||
197 | histb_pcie_establish_link(pp); | ||
198 | |||
199 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
200 | dw_pcie_msi_init(pp); | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static struct dw_pcie_host_ops histb_pcie_host_ops = { | ||
206 | .rd_own_conf = histb_pcie_rd_own_conf, | ||
207 | .wr_own_conf = histb_pcie_wr_own_conf, | ||
208 | .host_init = histb_pcie_host_init, | ||
209 | }; | ||
210 | |||
211 | static void histb_pcie_host_disable(struct histb_pcie *hipcie) | ||
212 | { | ||
213 | reset_control_assert(hipcie->soft_reset); | ||
214 | reset_control_assert(hipcie->sys_reset); | ||
215 | reset_control_assert(hipcie->bus_reset); | ||
216 | |||
217 | clk_disable_unprepare(hipcie->aux_clk); | ||
218 | clk_disable_unprepare(hipcie->pipe_clk); | ||
219 | clk_disable_unprepare(hipcie->sys_clk); | ||
220 | clk_disable_unprepare(hipcie->bus_clk); | ||
221 | |||
222 | if (gpio_is_valid(hipcie->reset_gpio)) | ||
223 | gpio_set_value_cansleep(hipcie->reset_gpio, 0); | ||
224 | |||
225 | if (hipcie->vpcie) | ||
226 | regulator_disable(hipcie->vpcie); | ||
227 | } | ||
228 | |||
229 | static int histb_pcie_host_enable(struct pcie_port *pp) | ||
230 | { | ||
231 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
232 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
233 | struct device *dev = pci->dev; | ||
234 | int ret; | ||
235 | |||
236 | /* power on PCIe device if have */ | ||
237 | if (hipcie->vpcie) { | ||
238 | ret = regulator_enable(hipcie->vpcie); | ||
239 | if (ret) { | ||
240 | dev_err(dev, "failed to enable regulator: %d\n", ret); | ||
241 | return ret; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | if (gpio_is_valid(hipcie->reset_gpio)) | ||
246 | gpio_set_value_cansleep(hipcie->reset_gpio, 1); | ||
247 | |||
248 | ret = clk_prepare_enable(hipcie->bus_clk); | ||
249 | if (ret) { | ||
250 | dev_err(dev, "cannot prepare/enable bus clk\n"); | ||
251 | goto err_bus_clk; | ||
252 | } | ||
253 | |||
254 | ret = clk_prepare_enable(hipcie->sys_clk); | ||
255 | if (ret) { | ||
256 | dev_err(dev, "cannot prepare/enable sys clk\n"); | ||
257 | goto err_sys_clk; | ||
258 | } | ||
259 | |||
260 | ret = clk_prepare_enable(hipcie->pipe_clk); | ||
261 | if (ret) { | ||
262 | dev_err(dev, "cannot prepare/enable pipe clk\n"); | ||
263 | goto err_pipe_clk; | ||
264 | } | ||
265 | |||
266 | ret = clk_prepare_enable(hipcie->aux_clk); | ||
267 | if (ret) { | ||
268 | dev_err(dev, "cannot prepare/enable aux clk\n"); | ||
269 | goto err_aux_clk; | ||
270 | } | ||
271 | |||
272 | reset_control_assert(hipcie->soft_reset); | ||
273 | reset_control_deassert(hipcie->soft_reset); | ||
274 | |||
275 | reset_control_assert(hipcie->sys_reset); | ||
276 | reset_control_deassert(hipcie->sys_reset); | ||
277 | |||
278 | reset_control_assert(hipcie->bus_reset); | ||
279 | reset_control_deassert(hipcie->bus_reset); | ||
280 | |||
281 | return 0; | ||
282 | |||
283 | err_aux_clk: | ||
284 | clk_disable_unprepare(hipcie->pipe_clk); | ||
285 | err_pipe_clk: | ||
286 | clk_disable_unprepare(hipcie->sys_clk); | ||
287 | err_sys_clk: | ||
288 | clk_disable_unprepare(hipcie->bus_clk); | ||
289 | err_bus_clk: | ||
290 | if (hipcie->vpcie) | ||
291 | regulator_disable(hipcie->vpcie); | ||
292 | |||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
297 | .read_dbi = histb_pcie_read_dbi, | ||
298 | .write_dbi = histb_pcie_write_dbi, | ||
299 | .link_up = histb_pcie_link_up, | ||
300 | }; | ||
301 | |||
302 | static int histb_pcie_probe(struct platform_device *pdev) | ||
303 | { | ||
304 | struct histb_pcie *hipcie; | ||
305 | struct dw_pcie *pci; | ||
306 | struct pcie_port *pp; | ||
307 | struct resource *res; | ||
308 | struct device_node *np = pdev->dev.of_node; | ||
309 | struct device *dev = &pdev->dev; | ||
310 | enum of_gpio_flags of_flags; | ||
311 | unsigned long flag = GPIOF_DIR_OUT; | ||
312 | int ret; | ||
313 | |||
314 | hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL); | ||
315 | if (!hipcie) | ||
316 | return -ENOMEM; | ||
317 | |||
318 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
319 | if (!pci) | ||
320 | return -ENOMEM; | ||
321 | |||
322 | hipcie->pci = pci; | ||
323 | pp = &pci->pp; | ||
324 | pci->dev = dev; | ||
325 | pci->ops = &dw_pcie_ops; | ||
326 | |||
327 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); | ||
328 | hipcie->ctrl = devm_ioremap_resource(dev, res); | ||
329 | if (IS_ERR(hipcie->ctrl)) { | ||
330 | dev_err(dev, "cannot get control reg base\n"); | ||
331 | return PTR_ERR(hipcie->ctrl); | ||
332 | } | ||
333 | |||
334 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); | ||
335 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
336 | if (IS_ERR(pci->dbi_base)) { | ||
337 | dev_err(dev, "cannot get rc-dbi base\n"); | ||
338 | return PTR_ERR(pci->dbi_base); | ||
339 | } | ||
340 | |||
341 | hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); | ||
342 | if (IS_ERR(hipcie->vpcie)) { | ||
343 | if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) | ||
344 | return -EPROBE_DEFER; | ||
345 | hipcie->vpcie = NULL; | ||
346 | } | ||
347 | |||
348 | hipcie->reset_gpio = of_get_named_gpio_flags(np, | ||
349 | "reset-gpios", 0, &of_flags); | ||
350 | if (of_flags & OF_GPIO_ACTIVE_LOW) | ||
351 | flag |= GPIOF_ACTIVE_LOW; | ||
352 | if (gpio_is_valid(hipcie->reset_gpio)) { | ||
353 | ret = devm_gpio_request_one(dev, hipcie->reset_gpio, | ||
354 | flag, "PCIe device power control"); | ||
355 | if (ret) { | ||
356 | dev_err(dev, "unable to request gpio\n"); | ||
357 | return ret; | ||
358 | } | ||
359 | } | ||
360 | |||
361 | hipcie->aux_clk = devm_clk_get(dev, "aux"); | ||
362 | if (IS_ERR(hipcie->aux_clk)) { | ||
363 | dev_err(dev, "Failed to get PCIe aux clk\n"); | ||
364 | return PTR_ERR(hipcie->aux_clk); | ||
365 | } | ||
366 | |||
367 | hipcie->pipe_clk = devm_clk_get(dev, "pipe"); | ||
368 | if (IS_ERR(hipcie->pipe_clk)) { | ||
369 | dev_err(dev, "Failed to get PCIe pipe clk\n"); | ||
370 | return PTR_ERR(hipcie->pipe_clk); | ||
371 | } | ||
372 | |||
373 | hipcie->sys_clk = devm_clk_get(dev, "sys"); | ||
374 | if (IS_ERR(hipcie->sys_clk)) { | ||
375 | dev_err(dev, "Failed to get PCIEe sys clk\n"); | ||
376 | return PTR_ERR(hipcie->sys_clk); | ||
377 | } | ||
378 | |||
379 | hipcie->bus_clk = devm_clk_get(dev, "bus"); | ||
380 | if (IS_ERR(hipcie->bus_clk)) { | ||
381 | dev_err(dev, "Failed to get PCIe bus clk\n"); | ||
382 | return PTR_ERR(hipcie->bus_clk); | ||
383 | } | ||
384 | |||
385 | hipcie->soft_reset = devm_reset_control_get(dev, "soft"); | ||
386 | if (IS_ERR(hipcie->soft_reset)) { | ||
387 | dev_err(dev, "couldn't get soft reset\n"); | ||
388 | return PTR_ERR(hipcie->soft_reset); | ||
389 | } | ||
390 | |||
391 | hipcie->sys_reset = devm_reset_control_get(dev, "sys"); | ||
392 | if (IS_ERR(hipcie->sys_reset)) { | ||
393 | dev_err(dev, "couldn't get sys reset\n"); | ||
394 | return PTR_ERR(hipcie->sys_reset); | ||
395 | } | ||
396 | |||
397 | hipcie->bus_reset = devm_reset_control_get(dev, "bus"); | ||
398 | if (IS_ERR(hipcie->bus_reset)) { | ||
399 | dev_err(dev, "couldn't get bus reset\n"); | ||
400 | return PTR_ERR(hipcie->bus_reset); | ||
401 | } | ||
402 | |||
403 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
404 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
405 | if (pp->msi_irq < 0) { | ||
406 | dev_err(dev, "Failed to get MSI IRQ\n"); | ||
407 | return pp->msi_irq; | ||
408 | } | ||
409 | } | ||
410 | |||
411 | hipcie->phy = devm_phy_get(dev, "phy"); | ||
412 | if (IS_ERR(hipcie->phy)) { | ||
413 | dev_info(dev, "no pcie-phy found\n"); | ||
414 | hipcie->phy = NULL; | ||
415 | /* fall through here! | ||
416 | * if no pcie-phy found, phy init | ||
417 | * should be done under boot! | ||
418 | */ | ||
419 | } else { | ||
420 | phy_init(hipcie->phy); | ||
421 | } | ||
422 | |||
423 | pp->root_bus_nr = -1; | ||
424 | pp->ops = &histb_pcie_host_ops; | ||
425 | |||
426 | platform_set_drvdata(pdev, hipcie); | ||
427 | |||
428 | ret = histb_pcie_host_enable(pp); | ||
429 | if (ret) { | ||
430 | dev_err(dev, "failed to enable host\n"); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | ret = dw_pcie_host_init(pp); | ||
435 | if (ret) { | ||
436 | dev_err(dev, "failed to initialize host\n"); | ||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | return 0; | ||
441 | } | ||
442 | |||
443 | static int histb_pcie_remove(struct platform_device *pdev) | ||
444 | { | ||
445 | struct histb_pcie *hipcie = platform_get_drvdata(pdev); | ||
446 | |||
447 | histb_pcie_host_disable(hipcie); | ||
448 | |||
449 | if (hipcie->phy) | ||
450 | phy_exit(hipcie->phy); | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static const struct of_device_id histb_pcie_of_match[] = { | ||
456 | { .compatible = "hisilicon,hi3798cv200-pcie", }, | ||
457 | {}, | ||
458 | }; | ||
459 | MODULE_DEVICE_TABLE(of, histb_pcie_of_match); | ||
460 | |||
461 | static struct platform_driver histb_pcie_platform_driver = { | ||
462 | .probe = histb_pcie_probe, | ||
463 | .remove = histb_pcie_remove, | ||
464 | .driver = { | ||
465 | .name = "histb-pcie", | ||
466 | .of_match_table = histb_pcie_of_match, | ||
467 | }, | ||
468 | }; | ||
469 | module_platform_driver(histb_pcie_platform_driver); | ||
470 | |||
471 | MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver"); | ||
472 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c new file mode 100644 index 000000000000..d2970a009eb5 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-kirin.c | |||
@@ -0,0 +1,515 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for Kirin Phone SoCs | ||
4 | * | ||
5 | * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. | ||
6 | * http://www.huawei.com | ||
7 | * | ||
8 | * Author: Xiaowei Song <songxiaowei@huawei.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/compiler.h> | ||
12 | #include <linux/clk.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/gpio.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/mfd/syscon.h> | ||
18 | #include <linux/of_address.h> | ||
19 | #include <linux/of_gpio.h> | ||
20 | #include <linux/of_pci.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/pci_regs.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/regmap.h> | ||
25 | #include <linux/resource.h> | ||
26 | #include <linux/types.h> | ||
27 | #include "pcie-designware.h" | ||
28 | |||
29 | #define to_kirin_pcie(x) dev_get_drvdata((x)->dev) | ||
30 | |||
31 | #define REF_CLK_FREQ 100000000 | ||
32 | |||
33 | /* PCIe ELBI registers */ | ||
34 | #define SOC_PCIECTRL_CTRL0_ADDR 0x000 | ||
35 | #define SOC_PCIECTRL_CTRL1_ADDR 0x004 | ||
36 | #define SOC_PCIEPHY_CTRL2_ADDR 0x008 | ||
37 | #define SOC_PCIEPHY_CTRL3_ADDR 0x00c | ||
38 | #define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) | ||
39 | |||
40 | /* info located in APB */ | ||
41 | #define PCIE_APP_LTSSM_ENABLE 0x01c | ||
42 | #define PCIE_APB_PHY_CTRL0 0x0 | ||
43 | #define PCIE_APB_PHY_CTRL1 0x4 | ||
44 | #define PCIE_APB_PHY_STATUS0 0x400 | ||
45 | #define PCIE_LINKUP_ENABLE (0x8020) | ||
46 | #define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) | ||
47 | #define PIPE_CLK_STABLE (0x1 << 19) | ||
48 | #define PHY_REF_PAD_BIT (0x1 << 8) | ||
49 | #define PHY_PWR_DOWN_BIT (0x1 << 22) | ||
50 | #define PHY_RST_ACK_BIT (0x1 << 16) | ||
51 | |||
52 | /* info located in sysctrl */ | ||
53 | #define SCTRL_PCIE_CMOS_OFFSET 0x60 | ||
54 | #define SCTRL_PCIE_CMOS_BIT 0x10 | ||
55 | #define SCTRL_PCIE_ISO_OFFSET 0x44 | ||
56 | #define SCTRL_PCIE_ISO_BIT 0x30 | ||
57 | #define SCTRL_PCIE_HPCLK_OFFSET 0x190 | ||
58 | #define SCTRL_PCIE_HPCLK_BIT 0x184000 | ||
59 | #define SCTRL_PCIE_OE_OFFSET 0x14a | ||
60 | #define PCIE_DEBOUNCE_PARAM 0xF0F400 | ||
61 | #define PCIE_OE_BYPASS (0x3 << 28) | ||
62 | |||
63 | /* peri_crg ctrl */ | ||
64 | #define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 | ||
65 | #define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 | ||
66 | |||
67 | /* Time for delay */ | ||
68 | #define REF_2_PERST_MIN 20000 | ||
69 | #define REF_2_PERST_MAX 25000 | ||
70 | #define PERST_2_ACCESS_MIN 10000 | ||
71 | #define PERST_2_ACCESS_MAX 12000 | ||
72 | #define LINK_WAIT_MIN 900 | ||
73 | #define LINK_WAIT_MAX 1000 | ||
74 | #define PIPE_CLK_WAIT_MIN 550 | ||
75 | #define PIPE_CLK_WAIT_MAX 600 | ||
76 | #define TIME_CMOS_MIN 100 | ||
77 | #define TIME_CMOS_MAX 105 | ||
78 | #define TIME_PHY_PD_MIN 10 | ||
79 | #define TIME_PHY_PD_MAX 11 | ||
80 | |||
81 | struct kirin_pcie { | ||
82 | struct dw_pcie *pci; | ||
83 | void __iomem *apb_base; | ||
84 | void __iomem *phy_base; | ||
85 | struct regmap *crgctrl; | ||
86 | struct regmap *sysctrl; | ||
87 | struct clk *apb_sys_clk; | ||
88 | struct clk *apb_phy_clk; | ||
89 | struct clk *phy_ref_clk; | ||
90 | struct clk *pcie_aclk; | ||
91 | struct clk *pcie_aux_clk; | ||
92 | int gpio_id_reset; | ||
93 | }; | ||
94 | |||
95 | /* Registers in PCIeCTRL */ | ||
96 | static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, | ||
97 | u32 val, u32 reg) | ||
98 | { | ||
99 | writel(val, kirin_pcie->apb_base + reg); | ||
100 | } | ||
101 | |||
102 | static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) | ||
103 | { | ||
104 | return readl(kirin_pcie->apb_base + reg); | ||
105 | } | ||
106 | |||
107 | /* Registers in PCIePHY */ | ||
108 | static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, | ||
109 | u32 val, u32 reg) | ||
110 | { | ||
111 | writel(val, kirin_pcie->phy_base + reg); | ||
112 | } | ||
113 | |||
114 | static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) | ||
115 | { | ||
116 | return readl(kirin_pcie->phy_base + reg); | ||
117 | } | ||
118 | |||
119 | static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, | ||
120 | struct platform_device *pdev) | ||
121 | { | ||
122 | struct device *dev = &pdev->dev; | ||
123 | |||
124 | kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); | ||
125 | if (IS_ERR(kirin_pcie->phy_ref_clk)) | ||
126 | return PTR_ERR(kirin_pcie->phy_ref_clk); | ||
127 | |||
128 | kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); | ||
129 | if (IS_ERR(kirin_pcie->pcie_aux_clk)) | ||
130 | return PTR_ERR(kirin_pcie->pcie_aux_clk); | ||
131 | |||
132 | kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); | ||
133 | if (IS_ERR(kirin_pcie->apb_phy_clk)) | ||
134 | return PTR_ERR(kirin_pcie->apb_phy_clk); | ||
135 | |||
136 | kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); | ||
137 | if (IS_ERR(kirin_pcie->apb_sys_clk)) | ||
138 | return PTR_ERR(kirin_pcie->apb_sys_clk); | ||
139 | |||
140 | kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); | ||
141 | if (IS_ERR(kirin_pcie->pcie_aclk)) | ||
142 | return PTR_ERR(kirin_pcie->pcie_aclk); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, | ||
148 | struct platform_device *pdev) | ||
149 | { | ||
150 | struct device *dev = &pdev->dev; | ||
151 | struct resource *apb; | ||
152 | struct resource *phy; | ||
153 | struct resource *dbi; | ||
154 | |||
155 | apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); | ||
156 | kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); | ||
157 | if (IS_ERR(kirin_pcie->apb_base)) | ||
158 | return PTR_ERR(kirin_pcie->apb_base); | ||
159 | |||
160 | phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); | ||
161 | kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); | ||
162 | if (IS_ERR(kirin_pcie->phy_base)) | ||
163 | return PTR_ERR(kirin_pcie->phy_base); | ||
164 | |||
165 | dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
166 | kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); | ||
167 | if (IS_ERR(kirin_pcie->pci->dbi_base)) | ||
168 | return PTR_ERR(kirin_pcie->pci->dbi_base); | ||
169 | |||
170 | kirin_pcie->crgctrl = | ||
171 | syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); | ||
172 | if (IS_ERR(kirin_pcie->crgctrl)) | ||
173 | return PTR_ERR(kirin_pcie->crgctrl); | ||
174 | |||
175 | kirin_pcie->sysctrl = | ||
176 | syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); | ||
177 | if (IS_ERR(kirin_pcie->sysctrl)) | ||
178 | return PTR_ERR(kirin_pcie->sysctrl); | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) | ||
184 | { | ||
185 | struct device *dev = kirin_pcie->pci->dev; | ||
186 | u32 reg_val; | ||
187 | |||
188 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); | ||
189 | reg_val &= ~PHY_REF_PAD_BIT; | ||
190 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); | ||
191 | |||
192 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); | ||
193 | reg_val &= ~PHY_PWR_DOWN_BIT; | ||
194 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); | ||
195 | usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); | ||
196 | |||
197 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); | ||
198 | reg_val &= ~PHY_RST_ACK_BIT; | ||
199 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); | ||
200 | |||
201 | usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); | ||
202 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); | ||
203 | if (reg_val & PIPE_CLK_STABLE) { | ||
204 | dev_err(dev, "PIPE clk is not stable\n"); | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) | ||
212 | { | ||
213 | u32 val; | ||
214 | |||
215 | regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); | ||
216 | val |= PCIE_DEBOUNCE_PARAM; | ||
217 | val &= ~PCIE_OE_BYPASS; | ||
218 | regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); | ||
219 | } | ||
220 | |||
221 | static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) | ||
222 | { | ||
223 | int ret = 0; | ||
224 | |||
225 | if (!enable) | ||
226 | goto close_clk; | ||
227 | |||
228 | ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); | ||
229 | if (ret) | ||
230 | return ret; | ||
231 | |||
232 | ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); | ||
233 | if (ret) | ||
234 | return ret; | ||
235 | |||
236 | ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); | ||
237 | if (ret) | ||
238 | goto apb_sys_fail; | ||
239 | |||
240 | ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); | ||
241 | if (ret) | ||
242 | goto apb_phy_fail; | ||
243 | |||
244 | ret = clk_prepare_enable(kirin_pcie->pcie_aclk); | ||
245 | if (ret) | ||
246 | goto aclk_fail; | ||
247 | |||
248 | ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); | ||
249 | if (ret) | ||
250 | goto aux_clk_fail; | ||
251 | |||
252 | return 0; | ||
253 | |||
254 | close_clk: | ||
255 | clk_disable_unprepare(kirin_pcie->pcie_aux_clk); | ||
256 | aux_clk_fail: | ||
257 | clk_disable_unprepare(kirin_pcie->pcie_aclk); | ||
258 | aclk_fail: | ||
259 | clk_disable_unprepare(kirin_pcie->apb_phy_clk); | ||
260 | apb_phy_fail: | ||
261 | clk_disable_unprepare(kirin_pcie->apb_sys_clk); | ||
262 | apb_sys_fail: | ||
263 | clk_disable_unprepare(kirin_pcie->phy_ref_clk); | ||
264 | |||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) | ||
269 | { | ||
270 | int ret; | ||
271 | |||
272 | /* Power supply for Host */ | ||
273 | regmap_write(kirin_pcie->sysctrl, | ||
274 | SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); | ||
275 | usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); | ||
276 | kirin_pcie_oe_enable(kirin_pcie); | ||
277 | |||
278 | ret = kirin_pcie_clk_ctrl(kirin_pcie, true); | ||
279 | if (ret) | ||
280 | return ret; | ||
281 | |||
282 | /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ | ||
283 | regmap_write(kirin_pcie->sysctrl, | ||
284 | SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); | ||
285 | regmap_write(kirin_pcie->crgctrl, | ||
286 | CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); | ||
287 | regmap_write(kirin_pcie->sysctrl, | ||
288 | SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); | ||
289 | |||
290 | ret = kirin_pcie_phy_init(kirin_pcie); | ||
291 | if (ret) | ||
292 | goto close_clk; | ||
293 | |||
294 | /* perst assert Endpoint */ | ||
295 | if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { | ||
296 | usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); | ||
297 | ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); | ||
298 | if (ret) | ||
299 | goto close_clk; | ||
300 | usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | close_clk: | ||
306 | kirin_pcie_clk_ctrl(kirin_pcie, false); | ||
307 | return ret; | ||
308 | } | ||
309 | |||
310 | static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, | ||
311 | bool on) | ||
312 | { | ||
313 | u32 val; | ||
314 | |||
315 | val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); | ||
316 | if (on) | ||
317 | val = val | PCIE_ELBI_SLV_DBI_ENABLE; | ||
318 | else | ||
319 | val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
320 | |||
321 | kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); | ||
322 | } | ||
323 | |||
324 | static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, | ||
325 | bool on) | ||
326 | { | ||
327 | u32 val; | ||
328 | |||
329 | val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); | ||
330 | if (on) | ||
331 | val = val | PCIE_ELBI_SLV_DBI_ENABLE; | ||
332 | else | ||
333 | val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
334 | |||
335 | kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); | ||
336 | } | ||
337 | |||
338 | static int kirin_pcie_rd_own_conf(struct pcie_port *pp, | ||
339 | int where, int size, u32 *val) | ||
340 | { | ||
341 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
342 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
343 | int ret; | ||
344 | |||
345 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); | ||
346 | ret = dw_pcie_read(pci->dbi_base + where, size, val); | ||
347 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); | ||
348 | |||
349 | return ret; | ||
350 | } | ||
351 | |||
352 | static int kirin_pcie_wr_own_conf(struct pcie_port *pp, | ||
353 | int where, int size, u32 val) | ||
354 | { | ||
355 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
356 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
357 | int ret; | ||
358 | |||
359 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); | ||
360 | ret = dw_pcie_write(pci->dbi_base + where, size, val); | ||
361 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); | ||
362 | |||
363 | return ret; | ||
364 | } | ||
365 | |||
366 | static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, | ||
367 | u32 reg, size_t size) | ||
368 | { | ||
369 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
370 | u32 ret; | ||
371 | |||
372 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); | ||
373 | dw_pcie_read(base + reg, size, &ret); | ||
374 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, | ||
380 | u32 reg, size_t size, u32 val) | ||
381 | { | ||
382 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
383 | |||
384 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); | ||
385 | dw_pcie_write(base + reg, size, val); | ||
386 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); | ||
387 | } | ||
388 | |||
389 | static int kirin_pcie_link_up(struct dw_pcie *pci) | ||
390 | { | ||
391 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
392 | u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); | ||
393 | |||
394 | if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) | ||
395 | return 1; | ||
396 | |||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static int kirin_pcie_establish_link(struct pcie_port *pp) | ||
401 | { | ||
402 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
403 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
404 | struct device *dev = kirin_pcie->pci->dev; | ||
405 | int count = 0; | ||
406 | |||
407 | if (kirin_pcie_link_up(pci)) | ||
408 | return 0; | ||
409 | |||
410 | dw_pcie_setup_rc(pp); | ||
411 | |||
412 | /* assert LTSSM enable */ | ||
413 | kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, | ||
414 | PCIE_APP_LTSSM_ENABLE); | ||
415 | |||
416 | /* check if the link is up or not */ | ||
417 | while (!kirin_pcie_link_up(pci)) { | ||
418 | usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); | ||
419 | count++; | ||
420 | if (count == 1000) { | ||
421 | dev_err(dev, "Link Fail\n"); | ||
422 | return -EINVAL; | ||
423 | } | ||
424 | } | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static int kirin_pcie_host_init(struct pcie_port *pp) | ||
430 | { | ||
431 | kirin_pcie_establish_link(pp); | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static struct dw_pcie_ops kirin_dw_pcie_ops = { | ||
437 | .read_dbi = kirin_pcie_read_dbi, | ||
438 | .write_dbi = kirin_pcie_write_dbi, | ||
439 | .link_up = kirin_pcie_link_up, | ||
440 | }; | ||
441 | |||
442 | static const struct dw_pcie_host_ops kirin_pcie_host_ops = { | ||
443 | .rd_own_conf = kirin_pcie_rd_own_conf, | ||
444 | .wr_own_conf = kirin_pcie_wr_own_conf, | ||
445 | .host_init = kirin_pcie_host_init, | ||
446 | }; | ||
447 | |||
448 | static int __init kirin_add_pcie_port(struct dw_pcie *pci, | ||
449 | struct platform_device *pdev) | ||
450 | { | ||
451 | pci->pp.ops = &kirin_pcie_host_ops; | ||
452 | |||
453 | return dw_pcie_host_init(&pci->pp); | ||
454 | } | ||
455 | |||
456 | static int kirin_pcie_probe(struct platform_device *pdev) | ||
457 | { | ||
458 | struct device *dev = &pdev->dev; | ||
459 | struct kirin_pcie *kirin_pcie; | ||
460 | struct dw_pcie *pci; | ||
461 | int ret; | ||
462 | |||
463 | if (!dev->of_node) { | ||
464 | dev_err(dev, "NULL node\n"); | ||
465 | return -EINVAL; | ||
466 | } | ||
467 | |||
468 | kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); | ||
469 | if (!kirin_pcie) | ||
470 | return -ENOMEM; | ||
471 | |||
472 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
473 | if (!pci) | ||
474 | return -ENOMEM; | ||
475 | |||
476 | pci->dev = dev; | ||
477 | pci->ops = &kirin_dw_pcie_ops; | ||
478 | kirin_pcie->pci = pci; | ||
479 | |||
480 | ret = kirin_pcie_get_clk(kirin_pcie, pdev); | ||
481 | if (ret) | ||
482 | return ret; | ||
483 | |||
484 | ret = kirin_pcie_get_resource(kirin_pcie, pdev); | ||
485 | if (ret) | ||
486 | return ret; | ||
487 | |||
488 | kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, | ||
489 | "reset-gpios", 0); | ||
490 | if (kirin_pcie->gpio_id_reset < 0) | ||
491 | return -ENODEV; | ||
492 | |||
493 | ret = kirin_pcie_power_on(kirin_pcie); | ||
494 | if (ret) | ||
495 | return ret; | ||
496 | |||
497 | platform_set_drvdata(pdev, kirin_pcie); | ||
498 | |||
499 | return kirin_add_pcie_port(pci, pdev); | ||
500 | } | ||
501 | |||
502 | static const struct of_device_id kirin_pcie_match[] = { | ||
503 | { .compatible = "hisilicon,kirin960-pcie" }, | ||
504 | {}, | ||
505 | }; | ||
506 | |||
507 | static struct platform_driver kirin_pcie_driver = { | ||
508 | .probe = kirin_pcie_probe, | ||
509 | .driver = { | ||
510 | .name = "kirin-pcie", | ||
511 | .of_match_table = kirin_pcie_match, | ||
512 | .suppress_bind_attrs = true, | ||
513 | }, | ||
514 | }; | ||
515 | builtin_platform_driver(kirin_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c new file mode 100644 index 000000000000..a1d0198081a6 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-qcom.c | |||
@@ -0,0 +1,1299 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Qualcomm PCIe root complex driver | ||
4 | * | ||
5 | * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. | ||
6 | * Copyright 2015 Linaro Limited. | ||
7 | * | ||
8 | * Author: Stanimir Varbanov <svarbanov@mm-sol.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/gpio/consumer.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/iopoll.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/of_device.h> | ||
20 | #include <linux/of_gpio.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/pm_runtime.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/phy/phy.h> | ||
25 | #include <linux/regulator/consumer.h> | ||
26 | #include <linux/reset.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/types.h> | ||
29 | |||
30 | #include "pcie-designware.h" | ||
31 | |||
32 | #define PCIE20_PARF_SYS_CTRL 0x00 | ||
33 | #define MST_WAKEUP_EN BIT(13) | ||
34 | #define SLV_WAKEUP_EN BIT(12) | ||
35 | #define MSTR_ACLK_CGC_DIS BIT(10) | ||
36 | #define SLV_ACLK_CGC_DIS BIT(9) | ||
37 | #define CORE_CLK_CGC_DIS BIT(6) | ||
38 | #define AUX_PWR_DET BIT(4) | ||
39 | #define L23_CLK_RMV_DIS BIT(2) | ||
40 | #define L1_CLK_RMV_DIS BIT(1) | ||
41 | |||
42 | #define PCIE20_COMMAND_STATUS 0x04 | ||
43 | #define CMD_BME_VAL 0x4 | ||
44 | #define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 | ||
45 | #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 | ||
46 | |||
47 | #define PCIE20_PARF_PHY_CTRL 0x40 | ||
48 | #define PCIE20_PARF_PHY_REFCLK 0x4C | ||
49 | #define PCIE20_PARF_DBI_BASE_ADDR 0x168 | ||
50 | #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C | ||
51 | #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 | ||
52 | #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 | ||
53 | #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 | ||
54 | #define PCIE20_PARF_LTSSM 0x1B0 | ||
55 | #define PCIE20_PARF_SID_OFFSET 0x234 | ||
56 | #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C | ||
57 | |||
58 | #define PCIE20_ELBI_SYS_CTRL 0x04 | ||
59 | #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) | ||
60 | |||
61 | #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 | ||
62 | #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 | ||
63 | #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 | ||
64 | #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c | ||
65 | #define CFG_BRIDGE_SB_INIT BIT(0) | ||
66 | |||
67 | #define PCIE20_CAP 0x70 | ||
68 | #define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) | ||
69 | #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) | ||
70 | #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) | ||
71 | #define PCIE_CAP_LINK1_VAL 0x2FD7F | ||
72 | |||
73 | #define PCIE20_PARF_Q2A_FLUSH 0x1AC | ||
74 | |||
75 | #define PCIE20_MISC_CONTROL_1_REG 0x8BC | ||
76 | #define DBI_RO_WR_EN 1 | ||
77 | |||
78 | #define PERST_DELAY_US 1000 | ||
79 | |||
80 | #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 | ||
81 | #define SLV_ADDR_SPACE_SZ 0x10000000 | ||
82 | |||
83 | #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 | ||
84 | struct qcom_pcie_resources_2_1_0 { | ||
85 | struct clk *iface_clk; | ||
86 | struct clk *core_clk; | ||
87 | struct clk *phy_clk; | ||
88 | struct reset_control *pci_reset; | ||
89 | struct reset_control *axi_reset; | ||
90 | struct reset_control *ahb_reset; | ||
91 | struct reset_control *por_reset; | ||
92 | struct reset_control *phy_reset; | ||
93 | struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; | ||
94 | }; | ||
95 | |||
96 | struct qcom_pcie_resources_1_0_0 { | ||
97 | struct clk *iface; | ||
98 | struct clk *aux; | ||
99 | struct clk *master_bus; | ||
100 | struct clk *slave_bus; | ||
101 | struct reset_control *core; | ||
102 | struct regulator *vdda; | ||
103 | }; | ||
104 | |||
105 | #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 | ||
106 | struct qcom_pcie_resources_2_3_2 { | ||
107 | struct clk *aux_clk; | ||
108 | struct clk *master_clk; | ||
109 | struct clk *slave_clk; | ||
110 | struct clk *cfg_clk; | ||
111 | struct clk *pipe_clk; | ||
112 | struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; | ||
113 | }; | ||
114 | |||
115 | struct qcom_pcie_resources_2_4_0 { | ||
116 | struct clk *aux_clk; | ||
117 | struct clk *master_clk; | ||
118 | struct clk *slave_clk; | ||
119 | struct reset_control *axi_m_reset; | ||
120 | struct reset_control *axi_s_reset; | ||
121 | struct reset_control *pipe_reset; | ||
122 | struct reset_control *axi_m_vmid_reset; | ||
123 | struct reset_control *axi_s_xpu_reset; | ||
124 | struct reset_control *parf_reset; | ||
125 | struct reset_control *phy_reset; | ||
126 | struct reset_control *axi_m_sticky_reset; | ||
127 | struct reset_control *pipe_sticky_reset; | ||
128 | struct reset_control *pwr_reset; | ||
129 | struct reset_control *ahb_reset; | ||
130 | struct reset_control *phy_ahb_reset; | ||
131 | }; | ||
132 | |||
133 | struct qcom_pcie_resources_2_3_3 { | ||
134 | struct clk *iface; | ||
135 | struct clk *axi_m_clk; | ||
136 | struct clk *axi_s_clk; | ||
137 | struct clk *ahb_clk; | ||
138 | struct clk *aux_clk; | ||
139 | struct reset_control *rst[7]; | ||
140 | }; | ||
141 | |||
142 | union qcom_pcie_resources { | ||
143 | struct qcom_pcie_resources_1_0_0 v1_0_0; | ||
144 | struct qcom_pcie_resources_2_1_0 v2_1_0; | ||
145 | struct qcom_pcie_resources_2_3_2 v2_3_2; | ||
146 | struct qcom_pcie_resources_2_3_3 v2_3_3; | ||
147 | struct qcom_pcie_resources_2_4_0 v2_4_0; | ||
148 | }; | ||
149 | |||
150 | struct qcom_pcie; | ||
151 | |||
152 | struct qcom_pcie_ops { | ||
153 | int (*get_resources)(struct qcom_pcie *pcie); | ||
154 | int (*init)(struct qcom_pcie *pcie); | ||
155 | int (*post_init)(struct qcom_pcie *pcie); | ||
156 | void (*deinit)(struct qcom_pcie *pcie); | ||
157 | void (*post_deinit)(struct qcom_pcie *pcie); | ||
158 | void (*ltssm_enable)(struct qcom_pcie *pcie); | ||
159 | }; | ||
160 | |||
161 | struct qcom_pcie { | ||
162 | struct dw_pcie *pci; | ||
163 | void __iomem *parf; /* DT parf */ | ||
164 | void __iomem *elbi; /* DT elbi */ | ||
165 | union qcom_pcie_resources res; | ||
166 | struct phy *phy; | ||
167 | struct gpio_desc *reset; | ||
168 | const struct qcom_pcie_ops *ops; | ||
169 | }; | ||
170 | |||
171 | #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) | ||
172 | |||
173 | static void qcom_ep_reset_assert(struct qcom_pcie *pcie) | ||
174 | { | ||
175 | gpiod_set_value_cansleep(pcie->reset, 1); | ||
176 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | ||
177 | } | ||
178 | |||
179 | static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) | ||
180 | { | ||
181 | gpiod_set_value_cansleep(pcie->reset, 0); | ||
182 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | ||
183 | } | ||
184 | |||
185 | static int qcom_pcie_establish_link(struct qcom_pcie *pcie) | ||
186 | { | ||
187 | struct dw_pcie *pci = pcie->pci; | ||
188 | |||
189 | if (dw_pcie_link_up(pci)) | ||
190 | return 0; | ||
191 | |||
192 | /* Enable Link Training state machine */ | ||
193 | if (pcie->ops->ltssm_enable) | ||
194 | pcie->ops->ltssm_enable(pcie); | ||
195 | |||
196 | return dw_pcie_wait_for_link(pci); | ||
197 | } | ||
198 | |||
199 | static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) | ||
200 | { | ||
201 | u32 val; | ||
202 | |||
203 | /* enable link training */ | ||
204 | val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
205 | val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; | ||
206 | writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
207 | } | ||
208 | |||
209 | static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) | ||
210 | { | ||
211 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; | ||
212 | struct dw_pcie *pci = pcie->pci; | ||
213 | struct device *dev = pci->dev; | ||
214 | int ret; | ||
215 | |||
216 | res->supplies[0].supply = "vdda"; | ||
217 | res->supplies[1].supply = "vdda_phy"; | ||
218 | res->supplies[2].supply = "vdda_refclk"; | ||
219 | ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), | ||
220 | res->supplies); | ||
221 | if (ret) | ||
222 | return ret; | ||
223 | |||
224 | res->iface_clk = devm_clk_get(dev, "iface"); | ||
225 | if (IS_ERR(res->iface_clk)) | ||
226 | return PTR_ERR(res->iface_clk); | ||
227 | |||
228 | res->core_clk = devm_clk_get(dev, "core"); | ||
229 | if (IS_ERR(res->core_clk)) | ||
230 | return PTR_ERR(res->core_clk); | ||
231 | |||
232 | res->phy_clk = devm_clk_get(dev, "phy"); | ||
233 | if (IS_ERR(res->phy_clk)) | ||
234 | return PTR_ERR(res->phy_clk); | ||
235 | |||
236 | res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); | ||
237 | if (IS_ERR(res->pci_reset)) | ||
238 | return PTR_ERR(res->pci_reset); | ||
239 | |||
240 | res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); | ||
241 | if (IS_ERR(res->axi_reset)) | ||
242 | return PTR_ERR(res->axi_reset); | ||
243 | |||
244 | res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); | ||
245 | if (IS_ERR(res->ahb_reset)) | ||
246 | return PTR_ERR(res->ahb_reset); | ||
247 | |||
248 | res->por_reset = devm_reset_control_get_exclusive(dev, "por"); | ||
249 | if (IS_ERR(res->por_reset)) | ||
250 | return PTR_ERR(res->por_reset); | ||
251 | |||
252 | res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); | ||
253 | return PTR_ERR_OR_ZERO(res->phy_reset); | ||
254 | } | ||
255 | |||
256 | static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) | ||
257 | { | ||
258 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; | ||
259 | |||
260 | reset_control_assert(res->pci_reset); | ||
261 | reset_control_assert(res->axi_reset); | ||
262 | reset_control_assert(res->ahb_reset); | ||
263 | reset_control_assert(res->por_reset); | ||
264 | reset_control_assert(res->pci_reset); | ||
265 | clk_disable_unprepare(res->iface_clk); | ||
266 | clk_disable_unprepare(res->core_clk); | ||
267 | clk_disable_unprepare(res->phy_clk); | ||
268 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
269 | } | ||
270 | |||
271 | static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) | ||
272 | { | ||
273 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; | ||
274 | struct dw_pcie *pci = pcie->pci; | ||
275 | struct device *dev = pci->dev; | ||
276 | u32 val; | ||
277 | int ret; | ||
278 | |||
279 | ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); | ||
280 | if (ret < 0) { | ||
281 | dev_err(dev, "cannot enable regulators\n"); | ||
282 | return ret; | ||
283 | } | ||
284 | |||
285 | ret = reset_control_assert(res->ahb_reset); | ||
286 | if (ret) { | ||
287 | dev_err(dev, "cannot assert ahb reset\n"); | ||
288 | goto err_assert_ahb; | ||
289 | } | ||
290 | |||
291 | ret = clk_prepare_enable(res->iface_clk); | ||
292 | if (ret) { | ||
293 | dev_err(dev, "cannot prepare/enable iface clock\n"); | ||
294 | goto err_assert_ahb; | ||
295 | } | ||
296 | |||
297 | ret = clk_prepare_enable(res->phy_clk); | ||
298 | if (ret) { | ||
299 | dev_err(dev, "cannot prepare/enable phy clock\n"); | ||
300 | goto err_clk_phy; | ||
301 | } | ||
302 | |||
303 | ret = clk_prepare_enable(res->core_clk); | ||
304 | if (ret) { | ||
305 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
306 | goto err_clk_core; | ||
307 | } | ||
308 | |||
309 | ret = reset_control_deassert(res->ahb_reset); | ||
310 | if (ret) { | ||
311 | dev_err(dev, "cannot deassert ahb reset\n"); | ||
312 | goto err_deassert_ahb; | ||
313 | } | ||
314 | |||
315 | /* enable PCIe clocks and resets */ | ||
316 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
317 | val &= ~BIT(0); | ||
318 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
319 | |||
320 | /* enable external reference clock */ | ||
321 | val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); | ||
322 | val |= BIT(16); | ||
323 | writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); | ||
324 | |||
325 | ret = reset_control_deassert(res->phy_reset); | ||
326 | if (ret) { | ||
327 | dev_err(dev, "cannot deassert phy reset\n"); | ||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | ret = reset_control_deassert(res->pci_reset); | ||
332 | if (ret) { | ||
333 | dev_err(dev, "cannot deassert pci reset\n"); | ||
334 | return ret; | ||
335 | } | ||
336 | |||
337 | ret = reset_control_deassert(res->por_reset); | ||
338 | if (ret) { | ||
339 | dev_err(dev, "cannot deassert por reset\n"); | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | ret = reset_control_deassert(res->axi_reset); | ||
344 | if (ret) { | ||
345 | dev_err(dev, "cannot deassert axi reset\n"); | ||
346 | return ret; | ||
347 | } | ||
348 | |||
349 | /* wait for clock acquisition */ | ||
350 | usleep_range(1000, 1500); | ||
351 | |||
352 | |||
353 | /* Set the Max TLP size to 2K, instead of using default of 4K */ | ||
354 | writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, | ||
355 | pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); | ||
356 | writel(CFG_BRIDGE_SB_INIT, | ||
357 | pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); | ||
358 | |||
359 | return 0; | ||
360 | |||
361 | err_deassert_ahb: | ||
362 | clk_disable_unprepare(res->core_clk); | ||
363 | err_clk_core: | ||
364 | clk_disable_unprepare(res->phy_clk); | ||
365 | err_clk_phy: | ||
366 | clk_disable_unprepare(res->iface_clk); | ||
367 | err_assert_ahb: | ||
368 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
369 | |||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) | ||
374 | { | ||
375 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; | ||
376 | struct dw_pcie *pci = pcie->pci; | ||
377 | struct device *dev = pci->dev; | ||
378 | |||
379 | res->vdda = devm_regulator_get(dev, "vdda"); | ||
380 | if (IS_ERR(res->vdda)) | ||
381 | return PTR_ERR(res->vdda); | ||
382 | |||
383 | res->iface = devm_clk_get(dev, "iface"); | ||
384 | if (IS_ERR(res->iface)) | ||
385 | return PTR_ERR(res->iface); | ||
386 | |||
387 | res->aux = devm_clk_get(dev, "aux"); | ||
388 | if (IS_ERR(res->aux)) | ||
389 | return PTR_ERR(res->aux); | ||
390 | |||
391 | res->master_bus = devm_clk_get(dev, "master_bus"); | ||
392 | if (IS_ERR(res->master_bus)) | ||
393 | return PTR_ERR(res->master_bus); | ||
394 | |||
395 | res->slave_bus = devm_clk_get(dev, "slave_bus"); | ||
396 | if (IS_ERR(res->slave_bus)) | ||
397 | return PTR_ERR(res->slave_bus); | ||
398 | |||
399 | res->core = devm_reset_control_get_exclusive(dev, "core"); | ||
400 | return PTR_ERR_OR_ZERO(res->core); | ||
401 | } | ||
402 | |||
403 | static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) | ||
404 | { | ||
405 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; | ||
406 | |||
407 | reset_control_assert(res->core); | ||
408 | clk_disable_unprepare(res->slave_bus); | ||
409 | clk_disable_unprepare(res->master_bus); | ||
410 | clk_disable_unprepare(res->iface); | ||
411 | clk_disable_unprepare(res->aux); | ||
412 | regulator_disable(res->vdda); | ||
413 | } | ||
414 | |||
415 | static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) | ||
416 | { | ||
417 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; | ||
418 | struct dw_pcie *pci = pcie->pci; | ||
419 | struct device *dev = pci->dev; | ||
420 | int ret; | ||
421 | |||
422 | ret = reset_control_deassert(res->core); | ||
423 | if (ret) { | ||
424 | dev_err(dev, "cannot deassert core reset\n"); | ||
425 | return ret; | ||
426 | } | ||
427 | |||
428 | ret = clk_prepare_enable(res->aux); | ||
429 | if (ret) { | ||
430 | dev_err(dev, "cannot prepare/enable aux clock\n"); | ||
431 | goto err_res; | ||
432 | } | ||
433 | |||
434 | ret = clk_prepare_enable(res->iface); | ||
435 | if (ret) { | ||
436 | dev_err(dev, "cannot prepare/enable iface clock\n"); | ||
437 | goto err_aux; | ||
438 | } | ||
439 | |||
440 | ret = clk_prepare_enable(res->master_bus); | ||
441 | if (ret) { | ||
442 | dev_err(dev, "cannot prepare/enable master_bus clock\n"); | ||
443 | goto err_iface; | ||
444 | } | ||
445 | |||
446 | ret = clk_prepare_enable(res->slave_bus); | ||
447 | if (ret) { | ||
448 | dev_err(dev, "cannot prepare/enable slave_bus clock\n"); | ||
449 | goto err_master; | ||
450 | } | ||
451 | |||
452 | ret = regulator_enable(res->vdda); | ||
453 | if (ret) { | ||
454 | dev_err(dev, "cannot enable vdda regulator\n"); | ||
455 | goto err_slave; | ||
456 | } | ||
457 | |||
458 | /* change DBI base address */ | ||
459 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
460 | |||
461 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
462 | u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); | ||
463 | |||
464 | val |= BIT(31); | ||
465 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); | ||
466 | } | ||
467 | |||
468 | return 0; | ||
469 | err_slave: | ||
470 | clk_disable_unprepare(res->slave_bus); | ||
471 | err_master: | ||
472 | clk_disable_unprepare(res->master_bus); | ||
473 | err_iface: | ||
474 | clk_disable_unprepare(res->iface); | ||
475 | err_aux: | ||
476 | clk_disable_unprepare(res->aux); | ||
477 | err_res: | ||
478 | reset_control_assert(res->core); | ||
479 | |||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) | ||
484 | { | ||
485 | u32 val; | ||
486 | |||
487 | /* enable link training */ | ||
488 | val = readl(pcie->parf + PCIE20_PARF_LTSSM); | ||
489 | val |= BIT(8); | ||
490 | writel(val, pcie->parf + PCIE20_PARF_LTSSM); | ||
491 | } | ||
492 | |||
493 | static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) | ||
494 | { | ||
495 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
496 | struct dw_pcie *pci = pcie->pci; | ||
497 | struct device *dev = pci->dev; | ||
498 | int ret; | ||
499 | |||
500 | res->supplies[0].supply = "vdda"; | ||
501 | res->supplies[1].supply = "vddpe-3v3"; | ||
502 | ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), | ||
503 | res->supplies); | ||
504 | if (ret) | ||
505 | return ret; | ||
506 | |||
507 | res->aux_clk = devm_clk_get(dev, "aux"); | ||
508 | if (IS_ERR(res->aux_clk)) | ||
509 | return PTR_ERR(res->aux_clk); | ||
510 | |||
511 | res->cfg_clk = devm_clk_get(dev, "cfg"); | ||
512 | if (IS_ERR(res->cfg_clk)) | ||
513 | return PTR_ERR(res->cfg_clk); | ||
514 | |||
515 | res->master_clk = devm_clk_get(dev, "bus_master"); | ||
516 | if (IS_ERR(res->master_clk)) | ||
517 | return PTR_ERR(res->master_clk); | ||
518 | |||
519 | res->slave_clk = devm_clk_get(dev, "bus_slave"); | ||
520 | if (IS_ERR(res->slave_clk)) | ||
521 | return PTR_ERR(res->slave_clk); | ||
522 | |||
523 | res->pipe_clk = devm_clk_get(dev, "pipe"); | ||
524 | return PTR_ERR_OR_ZERO(res->pipe_clk); | ||
525 | } | ||
526 | |||
527 | static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) | ||
528 | { | ||
529 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
530 | |||
531 | clk_disable_unprepare(res->slave_clk); | ||
532 | clk_disable_unprepare(res->master_clk); | ||
533 | clk_disable_unprepare(res->cfg_clk); | ||
534 | clk_disable_unprepare(res->aux_clk); | ||
535 | |||
536 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
537 | } | ||
538 | |||
539 | static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) | ||
540 | { | ||
541 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
542 | |||
543 | clk_disable_unprepare(res->pipe_clk); | ||
544 | } | ||
545 | |||
546 | static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) | ||
547 | { | ||
548 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
549 | struct dw_pcie *pci = pcie->pci; | ||
550 | struct device *dev = pci->dev; | ||
551 | u32 val; | ||
552 | int ret; | ||
553 | |||
554 | ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); | ||
555 | if (ret < 0) { | ||
556 | dev_err(dev, "cannot enable regulators\n"); | ||
557 | return ret; | ||
558 | } | ||
559 | |||
560 | ret = clk_prepare_enable(res->aux_clk); | ||
561 | if (ret) { | ||
562 | dev_err(dev, "cannot prepare/enable aux clock\n"); | ||
563 | goto err_aux_clk; | ||
564 | } | ||
565 | |||
566 | ret = clk_prepare_enable(res->cfg_clk); | ||
567 | if (ret) { | ||
568 | dev_err(dev, "cannot prepare/enable cfg clock\n"); | ||
569 | goto err_cfg_clk; | ||
570 | } | ||
571 | |||
572 | ret = clk_prepare_enable(res->master_clk); | ||
573 | if (ret) { | ||
574 | dev_err(dev, "cannot prepare/enable master clock\n"); | ||
575 | goto err_master_clk; | ||
576 | } | ||
577 | |||
578 | ret = clk_prepare_enable(res->slave_clk); | ||
579 | if (ret) { | ||
580 | dev_err(dev, "cannot prepare/enable slave clock\n"); | ||
581 | goto err_slave_clk; | ||
582 | } | ||
583 | |||
584 | /* enable PCIe clocks and resets */ | ||
585 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
586 | val &= ~BIT(0); | ||
587 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
588 | |||
589 | /* change DBI base address */ | ||
590 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
591 | |||
592 | /* MAC PHY_POWERDOWN MUX DISABLE */ | ||
593 | val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
594 | val &= ~BIT(29); | ||
595 | writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
596 | |||
597 | val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
598 | val |= BIT(4); | ||
599 | writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
600 | |||
601 | val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
602 | val |= BIT(31); | ||
603 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
604 | |||
605 | return 0; | ||
606 | |||
607 | err_slave_clk: | ||
608 | clk_disable_unprepare(res->master_clk); | ||
609 | err_master_clk: | ||
610 | clk_disable_unprepare(res->cfg_clk); | ||
611 | err_cfg_clk: | ||
612 | clk_disable_unprepare(res->aux_clk); | ||
613 | |||
614 | err_aux_clk: | ||
615 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
616 | |||
617 | return ret; | ||
618 | } | ||
619 | |||
620 | static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) | ||
621 | { | ||
622 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
623 | struct dw_pcie *pci = pcie->pci; | ||
624 | struct device *dev = pci->dev; | ||
625 | int ret; | ||
626 | |||
627 | ret = clk_prepare_enable(res->pipe_clk); | ||
628 | if (ret) { | ||
629 | dev_err(dev, "cannot prepare/enable pipe clock\n"); | ||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) | ||
637 | { | ||
638 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; | ||
639 | struct dw_pcie *pci = pcie->pci; | ||
640 | struct device *dev = pci->dev; | ||
641 | |||
642 | res->aux_clk = devm_clk_get(dev, "aux"); | ||
643 | if (IS_ERR(res->aux_clk)) | ||
644 | return PTR_ERR(res->aux_clk); | ||
645 | |||
646 | res->master_clk = devm_clk_get(dev, "master_bus"); | ||
647 | if (IS_ERR(res->master_clk)) | ||
648 | return PTR_ERR(res->master_clk); | ||
649 | |||
650 | res->slave_clk = devm_clk_get(dev, "slave_bus"); | ||
651 | if (IS_ERR(res->slave_clk)) | ||
652 | return PTR_ERR(res->slave_clk); | ||
653 | |||
654 | res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); | ||
655 | if (IS_ERR(res->axi_m_reset)) | ||
656 | return PTR_ERR(res->axi_m_reset); | ||
657 | |||
658 | res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); | ||
659 | if (IS_ERR(res->axi_s_reset)) | ||
660 | return PTR_ERR(res->axi_s_reset); | ||
661 | |||
662 | res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); | ||
663 | if (IS_ERR(res->pipe_reset)) | ||
664 | return PTR_ERR(res->pipe_reset); | ||
665 | |||
666 | res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, | ||
667 | "axi_m_vmid"); | ||
668 | if (IS_ERR(res->axi_m_vmid_reset)) | ||
669 | return PTR_ERR(res->axi_m_vmid_reset); | ||
670 | |||
671 | res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, | ||
672 | "axi_s_xpu"); | ||
673 | if (IS_ERR(res->axi_s_xpu_reset)) | ||
674 | return PTR_ERR(res->axi_s_xpu_reset); | ||
675 | |||
676 | res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); | ||
677 | if (IS_ERR(res->parf_reset)) | ||
678 | return PTR_ERR(res->parf_reset); | ||
679 | |||
680 | res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); | ||
681 | if (IS_ERR(res->phy_reset)) | ||
682 | return PTR_ERR(res->phy_reset); | ||
683 | |||
684 | res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, | ||
685 | "axi_m_sticky"); | ||
686 | if (IS_ERR(res->axi_m_sticky_reset)) | ||
687 | return PTR_ERR(res->axi_m_sticky_reset); | ||
688 | |||
689 | res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, | ||
690 | "pipe_sticky"); | ||
691 | if (IS_ERR(res->pipe_sticky_reset)) | ||
692 | return PTR_ERR(res->pipe_sticky_reset); | ||
693 | |||
694 | res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); | ||
695 | if (IS_ERR(res->pwr_reset)) | ||
696 | return PTR_ERR(res->pwr_reset); | ||
697 | |||
698 | res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); | ||
699 | if (IS_ERR(res->ahb_reset)) | ||
700 | return PTR_ERR(res->ahb_reset); | ||
701 | |||
702 | res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); | ||
703 | if (IS_ERR(res->phy_ahb_reset)) | ||
704 | return PTR_ERR(res->phy_ahb_reset); | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) | ||
710 | { | ||
711 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; | ||
712 | |||
713 | reset_control_assert(res->axi_m_reset); | ||
714 | reset_control_assert(res->axi_s_reset); | ||
715 | reset_control_assert(res->pipe_reset); | ||
716 | reset_control_assert(res->pipe_sticky_reset); | ||
717 | reset_control_assert(res->phy_reset); | ||
718 | reset_control_assert(res->phy_ahb_reset); | ||
719 | reset_control_assert(res->axi_m_sticky_reset); | ||
720 | reset_control_assert(res->pwr_reset); | ||
721 | reset_control_assert(res->ahb_reset); | ||
722 | clk_disable_unprepare(res->aux_clk); | ||
723 | clk_disable_unprepare(res->master_clk); | ||
724 | clk_disable_unprepare(res->slave_clk); | ||
725 | } | ||
726 | |||
727 | static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) | ||
728 | { | ||
729 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; | ||
730 | struct dw_pcie *pci = pcie->pci; | ||
731 | struct device *dev = pci->dev; | ||
732 | u32 val; | ||
733 | int ret; | ||
734 | |||
735 | ret = reset_control_assert(res->axi_m_reset); | ||
736 | if (ret) { | ||
737 | dev_err(dev, "cannot assert axi master reset\n"); | ||
738 | return ret; | ||
739 | } | ||
740 | |||
741 | ret = reset_control_assert(res->axi_s_reset); | ||
742 | if (ret) { | ||
743 | dev_err(dev, "cannot assert axi slave reset\n"); | ||
744 | return ret; | ||
745 | } | ||
746 | |||
747 | usleep_range(10000, 12000); | ||
748 | |||
749 | ret = reset_control_assert(res->pipe_reset); | ||
750 | if (ret) { | ||
751 | dev_err(dev, "cannot assert pipe reset\n"); | ||
752 | return ret; | ||
753 | } | ||
754 | |||
755 | ret = reset_control_assert(res->pipe_sticky_reset); | ||
756 | if (ret) { | ||
757 | dev_err(dev, "cannot assert pipe sticky reset\n"); | ||
758 | return ret; | ||
759 | } | ||
760 | |||
761 | ret = reset_control_assert(res->phy_reset); | ||
762 | if (ret) { | ||
763 | dev_err(dev, "cannot assert phy reset\n"); | ||
764 | return ret; | ||
765 | } | ||
766 | |||
767 | ret = reset_control_assert(res->phy_ahb_reset); | ||
768 | if (ret) { | ||
769 | dev_err(dev, "cannot assert phy ahb reset\n"); | ||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | usleep_range(10000, 12000); | ||
774 | |||
775 | ret = reset_control_assert(res->axi_m_sticky_reset); | ||
776 | if (ret) { | ||
777 | dev_err(dev, "cannot assert axi master sticky reset\n"); | ||
778 | return ret; | ||
779 | } | ||
780 | |||
781 | ret = reset_control_assert(res->pwr_reset); | ||
782 | if (ret) { | ||
783 | dev_err(dev, "cannot assert power reset\n"); | ||
784 | return ret; | ||
785 | } | ||
786 | |||
787 | ret = reset_control_assert(res->ahb_reset); | ||
788 | if (ret) { | ||
789 | dev_err(dev, "cannot assert ahb reset\n"); | ||
790 | return ret; | ||
791 | } | ||
792 | |||
793 | usleep_range(10000, 12000); | ||
794 | |||
795 | ret = reset_control_deassert(res->phy_ahb_reset); | ||
796 | if (ret) { | ||
797 | dev_err(dev, "cannot deassert phy ahb reset\n"); | ||
798 | return ret; | ||
799 | } | ||
800 | |||
801 | ret = reset_control_deassert(res->phy_reset); | ||
802 | if (ret) { | ||
803 | dev_err(dev, "cannot deassert phy reset\n"); | ||
804 | goto err_rst_phy; | ||
805 | } | ||
806 | |||
807 | ret = reset_control_deassert(res->pipe_reset); | ||
808 | if (ret) { | ||
809 | dev_err(dev, "cannot deassert pipe reset\n"); | ||
810 | goto err_rst_pipe; | ||
811 | } | ||
812 | |||
813 | ret = reset_control_deassert(res->pipe_sticky_reset); | ||
814 | if (ret) { | ||
815 | dev_err(dev, "cannot deassert pipe sticky reset\n"); | ||
816 | goto err_rst_pipe_sticky; | ||
817 | } | ||
818 | |||
819 | usleep_range(10000, 12000); | ||
820 | |||
821 | ret = reset_control_deassert(res->axi_m_reset); | ||
822 | if (ret) { | ||
823 | dev_err(dev, "cannot deassert axi master reset\n"); | ||
824 | goto err_rst_axi_m; | ||
825 | } | ||
826 | |||
827 | ret = reset_control_deassert(res->axi_m_sticky_reset); | ||
828 | if (ret) { | ||
829 | dev_err(dev, "cannot deassert axi master sticky reset\n"); | ||
830 | goto err_rst_axi_m_sticky; | ||
831 | } | ||
832 | |||
833 | ret = reset_control_deassert(res->axi_s_reset); | ||
834 | if (ret) { | ||
835 | dev_err(dev, "cannot deassert axi slave reset\n"); | ||
836 | goto err_rst_axi_s; | ||
837 | } | ||
838 | |||
839 | ret = reset_control_deassert(res->pwr_reset); | ||
840 | if (ret) { | ||
841 | dev_err(dev, "cannot deassert power reset\n"); | ||
842 | goto err_rst_pwr; | ||
843 | } | ||
844 | |||
845 | ret = reset_control_deassert(res->ahb_reset); | ||
846 | if (ret) { | ||
847 | dev_err(dev, "cannot deassert ahb reset\n"); | ||
848 | goto err_rst_ahb; | ||
849 | } | ||
850 | |||
851 | usleep_range(10000, 12000); | ||
852 | |||
853 | ret = clk_prepare_enable(res->aux_clk); | ||
854 | if (ret) { | ||
855 | dev_err(dev, "cannot prepare/enable iface clock\n"); | ||
856 | goto err_clk_aux; | ||
857 | } | ||
858 | |||
859 | ret = clk_prepare_enable(res->master_clk); | ||
860 | if (ret) { | ||
861 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
862 | goto err_clk_axi_m; | ||
863 | } | ||
864 | |||
865 | ret = clk_prepare_enable(res->slave_clk); | ||
866 | if (ret) { | ||
867 | dev_err(dev, "cannot prepare/enable phy clock\n"); | ||
868 | goto err_clk_axi_s; | ||
869 | } | ||
870 | |||
871 | /* enable PCIe clocks and resets */ | ||
872 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
873 | val &= ~BIT(0); | ||
874 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
875 | |||
876 | /* change DBI base address */ | ||
877 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
878 | |||
879 | /* MAC PHY_POWERDOWN MUX DISABLE */ | ||
880 | val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
881 | val &= ~BIT(29); | ||
882 | writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
883 | |||
884 | val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
885 | val |= BIT(4); | ||
886 | writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
887 | |||
888 | val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
889 | val |= BIT(31); | ||
890 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
891 | |||
892 | return 0; | ||
893 | |||
894 | err_clk_axi_s: | ||
895 | clk_disable_unprepare(res->master_clk); | ||
896 | err_clk_axi_m: | ||
897 | clk_disable_unprepare(res->aux_clk); | ||
898 | err_clk_aux: | ||
899 | reset_control_assert(res->ahb_reset); | ||
900 | err_rst_ahb: | ||
901 | reset_control_assert(res->pwr_reset); | ||
902 | err_rst_pwr: | ||
903 | reset_control_assert(res->axi_s_reset); | ||
904 | err_rst_axi_s: | ||
905 | reset_control_assert(res->axi_m_sticky_reset); | ||
906 | err_rst_axi_m_sticky: | ||
907 | reset_control_assert(res->axi_m_reset); | ||
908 | err_rst_axi_m: | ||
909 | reset_control_assert(res->pipe_sticky_reset); | ||
910 | err_rst_pipe_sticky: | ||
911 | reset_control_assert(res->pipe_reset); | ||
912 | err_rst_pipe: | ||
913 | reset_control_assert(res->phy_reset); | ||
914 | err_rst_phy: | ||
915 | reset_control_assert(res->phy_ahb_reset); | ||
916 | return ret; | ||
917 | } | ||
918 | |||
919 | static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) | ||
920 | { | ||
921 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
922 | struct dw_pcie *pci = pcie->pci; | ||
923 | struct device *dev = pci->dev; | ||
924 | int i; | ||
925 | const char *rst_names[] = { "axi_m", "axi_s", "pipe", | ||
926 | "axi_m_sticky", "sticky", | ||
927 | "ahb", "sleep", }; | ||
928 | |||
929 | res->iface = devm_clk_get(dev, "iface"); | ||
930 | if (IS_ERR(res->iface)) | ||
931 | return PTR_ERR(res->iface); | ||
932 | |||
933 | res->axi_m_clk = devm_clk_get(dev, "axi_m"); | ||
934 | if (IS_ERR(res->axi_m_clk)) | ||
935 | return PTR_ERR(res->axi_m_clk); | ||
936 | |||
937 | res->axi_s_clk = devm_clk_get(dev, "axi_s"); | ||
938 | if (IS_ERR(res->axi_s_clk)) | ||
939 | return PTR_ERR(res->axi_s_clk); | ||
940 | |||
941 | res->ahb_clk = devm_clk_get(dev, "ahb"); | ||
942 | if (IS_ERR(res->ahb_clk)) | ||
943 | return PTR_ERR(res->ahb_clk); | ||
944 | |||
945 | res->aux_clk = devm_clk_get(dev, "aux"); | ||
946 | if (IS_ERR(res->aux_clk)) | ||
947 | return PTR_ERR(res->aux_clk); | ||
948 | |||
949 | for (i = 0; i < ARRAY_SIZE(rst_names); i++) { | ||
950 | res->rst[i] = devm_reset_control_get(dev, rst_names[i]); | ||
951 | if (IS_ERR(res->rst[i])) | ||
952 | return PTR_ERR(res->rst[i]); | ||
953 | } | ||
954 | |||
955 | return 0; | ||
956 | } | ||
957 | |||
958 | static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) | ||
959 | { | ||
960 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
961 | |||
962 | clk_disable_unprepare(res->iface); | ||
963 | clk_disable_unprepare(res->axi_m_clk); | ||
964 | clk_disable_unprepare(res->axi_s_clk); | ||
965 | clk_disable_unprepare(res->ahb_clk); | ||
966 | clk_disable_unprepare(res->aux_clk); | ||
967 | } | ||
968 | |||
969 | static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) | ||
970 | { | ||
971 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
972 | struct dw_pcie *pci = pcie->pci; | ||
973 | struct device *dev = pci->dev; | ||
974 | int i, ret; | ||
975 | u32 val; | ||
976 | |||
977 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) { | ||
978 | ret = reset_control_assert(res->rst[i]); | ||
979 | if (ret) { | ||
980 | dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); | ||
981 | return ret; | ||
982 | } | ||
983 | } | ||
984 | |||
985 | usleep_range(2000, 2500); | ||
986 | |||
987 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) { | ||
988 | ret = reset_control_deassert(res->rst[i]); | ||
989 | if (ret) { | ||
990 | dev_err(dev, "reset #%d deassert failed (%d)\n", i, | ||
991 | ret); | ||
992 | return ret; | ||
993 | } | ||
994 | } | ||
995 | |||
996 | /* | ||
997 | * Don't have a way to see if the reset has completed. | ||
998 | * Wait for some time. | ||
999 | */ | ||
1000 | usleep_range(2000, 2500); | ||
1001 | |||
1002 | ret = clk_prepare_enable(res->iface); | ||
1003 | if (ret) { | ||
1004 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
1005 | goto err_clk_iface; | ||
1006 | } | ||
1007 | |||
1008 | ret = clk_prepare_enable(res->axi_m_clk); | ||
1009 | if (ret) { | ||
1010 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
1011 | goto err_clk_axi_m; | ||
1012 | } | ||
1013 | |||
1014 | ret = clk_prepare_enable(res->axi_s_clk); | ||
1015 | if (ret) { | ||
1016 | dev_err(dev, "cannot prepare/enable axi slave clock\n"); | ||
1017 | goto err_clk_axi_s; | ||
1018 | } | ||
1019 | |||
1020 | ret = clk_prepare_enable(res->ahb_clk); | ||
1021 | if (ret) { | ||
1022 | dev_err(dev, "cannot prepare/enable ahb clock\n"); | ||
1023 | goto err_clk_ahb; | ||
1024 | } | ||
1025 | |||
1026 | ret = clk_prepare_enable(res->aux_clk); | ||
1027 | if (ret) { | ||
1028 | dev_err(dev, "cannot prepare/enable aux clock\n"); | ||
1029 | goto err_clk_aux; | ||
1030 | } | ||
1031 | |||
1032 | writel(SLV_ADDR_SPACE_SZ, | ||
1033 | pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); | ||
1034 | |||
1035 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
1036 | val &= ~BIT(0); | ||
1037 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
1038 | |||
1039 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
1040 | |||
1041 | writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | ||
1042 | | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | | ||
1043 | AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, | ||
1044 | pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
1045 | writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); | ||
1046 | |||
1047 | writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); | ||
1048 | writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); | ||
1049 | writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); | ||
1050 | |||
1051 | val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); | ||
1052 | val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; | ||
1053 | writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); | ||
1054 | |||
1055 | writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + | ||
1056 | PCIE20_DEVICE_CONTROL2_STATUS2); | ||
1057 | |||
1058 | return 0; | ||
1059 | |||
1060 | err_clk_aux: | ||
1061 | clk_disable_unprepare(res->ahb_clk); | ||
1062 | err_clk_ahb: | ||
1063 | clk_disable_unprepare(res->axi_s_clk); | ||
1064 | err_clk_axi_s: | ||
1065 | clk_disable_unprepare(res->axi_m_clk); | ||
1066 | err_clk_axi_m: | ||
1067 | clk_disable_unprepare(res->iface); | ||
1068 | err_clk_iface: | ||
1069 | /* | ||
1070 | * Not checking for failure, will anyway return | ||
1071 | * the original failure in 'ret'. | ||
1072 | */ | ||
1073 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) | ||
1074 | reset_control_assert(res->rst[i]); | ||
1075 | |||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1079 | static int qcom_pcie_link_up(struct dw_pcie *pci) | ||
1080 | { | ||
1081 | u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); | ||
1082 | |||
1083 | return !!(val & PCI_EXP_LNKSTA_DLLLA); | ||
1084 | } | ||
1085 | |||
1086 | static int qcom_pcie_host_init(struct pcie_port *pp) | ||
1087 | { | ||
1088 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
1089 | struct qcom_pcie *pcie = to_qcom_pcie(pci); | ||
1090 | int ret; | ||
1091 | |||
1092 | pm_runtime_get_sync(pci->dev); | ||
1093 | qcom_ep_reset_assert(pcie); | ||
1094 | |||
1095 | ret = pcie->ops->init(pcie); | ||
1096 | if (ret) | ||
1097 | return ret; | ||
1098 | |||
1099 | ret = phy_power_on(pcie->phy); | ||
1100 | if (ret) | ||
1101 | goto err_deinit; | ||
1102 | |||
1103 | if (pcie->ops->post_init) { | ||
1104 | ret = pcie->ops->post_init(pcie); | ||
1105 | if (ret) | ||
1106 | goto err_disable_phy; | ||
1107 | } | ||
1108 | |||
1109 | dw_pcie_setup_rc(pp); | ||
1110 | |||
1111 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
1112 | dw_pcie_msi_init(pp); | ||
1113 | |||
1114 | qcom_ep_reset_deassert(pcie); | ||
1115 | |||
1116 | ret = qcom_pcie_establish_link(pcie); | ||
1117 | if (ret) | ||
1118 | goto err; | ||
1119 | |||
1120 | return 0; | ||
1121 | err: | ||
1122 | qcom_ep_reset_assert(pcie); | ||
1123 | if (pcie->ops->post_deinit) | ||
1124 | pcie->ops->post_deinit(pcie); | ||
1125 | err_disable_phy: | ||
1126 | phy_power_off(pcie->phy); | ||
1127 | err_deinit: | ||
1128 | pcie->ops->deinit(pcie); | ||
1129 | pm_runtime_put(pci->dev); | ||
1130 | |||
1131 | return ret; | ||
1132 | } | ||
1133 | |||
1134 | static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
1135 | u32 *val) | ||
1136 | { | ||
1137 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
1138 | |||
1139 | /* the device class is not reported correctly from the register */ | ||
1140 | if (where == PCI_CLASS_REVISION && size == 4) { | ||
1141 | *val = readl(pci->dbi_base + PCI_CLASS_REVISION); | ||
1142 | *val &= 0xff; /* keep revision id */ | ||
1143 | *val |= PCI_CLASS_BRIDGE_PCI << 16; | ||
1144 | return PCIBIOS_SUCCESSFUL; | ||
1145 | } | ||
1146 | |||
1147 | return dw_pcie_read(pci->dbi_base + where, size, val); | ||
1148 | } | ||
1149 | |||
1150 | static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { | ||
1151 | .host_init = qcom_pcie_host_init, | ||
1152 | .rd_own_conf = qcom_pcie_rd_own_conf, | ||
1153 | }; | ||
1154 | |||
1155 | /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ | ||
1156 | static const struct qcom_pcie_ops ops_2_1_0 = { | ||
1157 | .get_resources = qcom_pcie_get_resources_2_1_0, | ||
1158 | .init = qcom_pcie_init_2_1_0, | ||
1159 | .deinit = qcom_pcie_deinit_2_1_0, | ||
1160 | .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, | ||
1161 | }; | ||
1162 | |||
1163 | /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ | ||
1164 | static const struct qcom_pcie_ops ops_1_0_0 = { | ||
1165 | .get_resources = qcom_pcie_get_resources_1_0_0, | ||
1166 | .init = qcom_pcie_init_1_0_0, | ||
1167 | .deinit = qcom_pcie_deinit_1_0_0, | ||
1168 | .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, | ||
1169 | }; | ||
1170 | |||
1171 | /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ | ||
1172 | static const struct qcom_pcie_ops ops_2_3_2 = { | ||
1173 | .get_resources = qcom_pcie_get_resources_2_3_2, | ||
1174 | .init = qcom_pcie_init_2_3_2, | ||
1175 | .post_init = qcom_pcie_post_init_2_3_2, | ||
1176 | .deinit = qcom_pcie_deinit_2_3_2, | ||
1177 | .post_deinit = qcom_pcie_post_deinit_2_3_2, | ||
1178 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
1179 | }; | ||
1180 | |||
1181 | /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ | ||
1182 | static const struct qcom_pcie_ops ops_2_4_0 = { | ||
1183 | .get_resources = qcom_pcie_get_resources_2_4_0, | ||
1184 | .init = qcom_pcie_init_2_4_0, | ||
1185 | .deinit = qcom_pcie_deinit_2_4_0, | ||
1186 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
1187 | }; | ||
1188 | |||
1189 | /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ | ||
1190 | static const struct qcom_pcie_ops ops_2_3_3 = { | ||
1191 | .get_resources = qcom_pcie_get_resources_2_3_3, | ||
1192 | .init = qcom_pcie_init_2_3_3, | ||
1193 | .deinit = qcom_pcie_deinit_2_3_3, | ||
1194 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
1195 | }; | ||
1196 | |||
1197 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
1198 | .link_up = qcom_pcie_link_up, | ||
1199 | }; | ||
1200 | |||
1201 | static int qcom_pcie_probe(struct platform_device *pdev) | ||
1202 | { | ||
1203 | struct device *dev = &pdev->dev; | ||
1204 | struct resource *res; | ||
1205 | struct pcie_port *pp; | ||
1206 | struct dw_pcie *pci; | ||
1207 | struct qcom_pcie *pcie; | ||
1208 | int ret; | ||
1209 | |||
1210 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
1211 | if (!pcie) | ||
1212 | return -ENOMEM; | ||
1213 | |||
1214 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
1215 | if (!pci) | ||
1216 | return -ENOMEM; | ||
1217 | |||
1218 | pm_runtime_enable(dev); | ||
1219 | pci->dev = dev; | ||
1220 | pci->ops = &dw_pcie_ops; | ||
1221 | pp = &pci->pp; | ||
1222 | |||
1223 | pcie->pci = pci; | ||
1224 | |||
1225 | pcie->ops = of_device_get_match_data(dev); | ||
1226 | |||
1227 | pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); | ||
1228 | if (IS_ERR(pcie->reset)) | ||
1229 | return PTR_ERR(pcie->reset); | ||
1230 | |||
1231 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); | ||
1232 | pcie->parf = devm_ioremap_resource(dev, res); | ||
1233 | if (IS_ERR(pcie->parf)) | ||
1234 | return PTR_ERR(pcie->parf); | ||
1235 | |||
1236 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
1237 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); | ||
1238 | if (IS_ERR(pci->dbi_base)) | ||
1239 | return PTR_ERR(pci->dbi_base); | ||
1240 | |||
1241 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); | ||
1242 | pcie->elbi = devm_ioremap_resource(dev, res); | ||
1243 | if (IS_ERR(pcie->elbi)) | ||
1244 | return PTR_ERR(pcie->elbi); | ||
1245 | |||
1246 | pcie->phy = devm_phy_optional_get(dev, "pciephy"); | ||
1247 | if (IS_ERR(pcie->phy)) | ||
1248 | return PTR_ERR(pcie->phy); | ||
1249 | |||
1250 | ret = pcie->ops->get_resources(pcie); | ||
1251 | if (ret) | ||
1252 | return ret; | ||
1253 | |||
1254 | pp->root_bus_nr = -1; | ||
1255 | pp->ops = &qcom_pcie_dw_ops; | ||
1256 | |||
1257 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
1258 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
1259 | if (pp->msi_irq < 0) | ||
1260 | return pp->msi_irq; | ||
1261 | } | ||
1262 | |||
1263 | ret = phy_init(pcie->phy); | ||
1264 | if (ret) { | ||
1265 | pm_runtime_disable(&pdev->dev); | ||
1266 | return ret; | ||
1267 | } | ||
1268 | |||
1269 | platform_set_drvdata(pdev, pcie); | ||
1270 | |||
1271 | ret = dw_pcie_host_init(pp); | ||
1272 | if (ret) { | ||
1273 | dev_err(dev, "cannot initialize host\n"); | ||
1274 | pm_runtime_disable(&pdev->dev); | ||
1275 | return ret; | ||
1276 | } | ||
1277 | |||
1278 | return 0; | ||
1279 | } | ||
1280 | |||
1281 | static const struct of_device_id qcom_pcie_match[] = { | ||
1282 | { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, | ||
1283 | { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, | ||
1284 | { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, | ||
1285 | { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, | ||
1286 | { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, | ||
1287 | { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, | ||
1288 | { } | ||
1289 | }; | ||
1290 | |||
1291 | static struct platform_driver qcom_pcie_driver = { | ||
1292 | .probe = qcom_pcie_probe, | ||
1293 | .driver = { | ||
1294 | .name = "qcom-pcie", | ||
1295 | .suppress_bind_attrs = true, | ||
1296 | .of_match_table = qcom_pcie_match, | ||
1297 | }, | ||
1298 | }; | ||
1299 | builtin_platform_driver(qcom_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c new file mode 100644 index 000000000000..ecb58f7b7566 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c | |||
@@ -0,0 +1,314 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs | ||
4 | * | ||
5 | * SPEAr13xx PCIe Glue Layer Source Code | ||
6 | * | ||
7 | * Copyright (C) 2010-2014 ST Microelectronics | ||
8 | * Pratyush Anand <pratyush.anand@gmail.com> | ||
9 | * Mohit Kumar <mohit.kumar.dhaka@gmail.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/clk.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/phy/phy.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/resource.h> | ||
21 | |||
22 | #include "pcie-designware.h" | ||
23 | |||
24 | struct spear13xx_pcie { | ||
25 | struct dw_pcie *pci; | ||
26 | void __iomem *app_base; | ||
27 | struct phy *phy; | ||
28 | struct clk *clk; | ||
29 | bool is_gen1; | ||
30 | }; | ||
31 | |||
32 | struct pcie_app_reg { | ||
33 | u32 app_ctrl_0; /* cr0 */ | ||
34 | u32 app_ctrl_1; /* cr1 */ | ||
35 | u32 app_status_0; /* cr2 */ | ||
36 | u32 app_status_1; /* cr3 */ | ||
37 | u32 msg_status; /* cr4 */ | ||
38 | u32 msg_payload; /* cr5 */ | ||
39 | u32 int_sts; /* cr6 */ | ||
40 | u32 int_clr; /* cr7 */ | ||
41 | u32 int_mask; /* cr8 */ | ||
42 | u32 mst_bmisc; /* cr9 */ | ||
43 | u32 phy_ctrl; /* cr10 */ | ||
44 | u32 phy_status; /* cr11 */ | ||
45 | u32 cxpl_debug_info_0; /* cr12 */ | ||
46 | u32 cxpl_debug_info_1; /* cr13 */ | ||
47 | u32 ven_msg_ctrl_0; /* cr14 */ | ||
48 | u32 ven_msg_ctrl_1; /* cr15 */ | ||
49 | u32 ven_msg_data_0; /* cr16 */ | ||
50 | u32 ven_msg_data_1; /* cr17 */ | ||
51 | u32 ven_msi_0; /* cr18 */ | ||
52 | u32 ven_msi_1; /* cr19 */ | ||
53 | u32 mst_rmisc; /* cr20 */ | ||
54 | }; | ||
55 | |||
56 | /* CR0 ID */ | ||
57 | #define APP_LTSSM_ENABLE_ID 3 | ||
58 | #define DEVICE_TYPE_RC (4 << 25) | ||
59 | #define MISCTRL_EN_ID 30 | ||
60 | #define REG_TRANSLATION_ENABLE 31 | ||
61 | |||
62 | /* CR3 ID */ | ||
63 | #define XMLH_LINK_UP (1 << 6) | ||
64 | |||
65 | /* CR6 */ | ||
66 | #define MSI_CTRL_INT (1 << 26) | ||
67 | |||
68 | #define EXP_CAP_ID_OFFSET 0x70 | ||
69 | |||
70 | #define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev) | ||
71 | |||
72 | static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) | ||
73 | { | ||
74 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
75 | struct pcie_port *pp = &pci->pp; | ||
76 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
77 | u32 val; | ||
78 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; | ||
79 | |||
80 | if (dw_pcie_link_up(pci)) { | ||
81 | dev_err(pci->dev, "link already up\n"); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | dw_pcie_setup_rc(pp); | ||
86 | |||
87 | /* | ||
88 | * this controller support only 128 bytes read size, however its | ||
89 | * default value in capability register is 512 bytes. So force | ||
90 | * it to 128 here. | ||
91 | */ | ||
92 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); | ||
93 | val &= ~PCI_EXP_DEVCTL_READRQ; | ||
94 | dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); | ||
95 | |||
96 | dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A); | ||
97 | dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); | ||
98 | |||
99 | /* | ||
100 | * if is_gen1 is set then handle it, so that some buggy card | ||
101 | * also works | ||
102 | */ | ||
103 | if (spear13xx_pcie->is_gen1) { | ||
104 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, | ||
105 | 4, &val); | ||
106 | if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
107 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
108 | val |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
109 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
110 | PCI_EXP_LNKCAP, 4, val); | ||
111 | } | ||
112 | |||
113 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, | ||
114 | 2, &val); | ||
115 | if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
116 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
117 | val |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
118 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
119 | PCI_EXP_LNKCTL2, 2, val); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | /* enable ltssm */ | ||
124 | writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) | ||
125 | | (1 << APP_LTSSM_ENABLE_ID) | ||
126 | | ((u32)1 << REG_TRANSLATION_ENABLE), | ||
127 | &app_reg->app_ctrl_0); | ||
128 | |||
129 | return dw_pcie_wait_for_link(pci); | ||
130 | } | ||
131 | |||
132 | static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) | ||
133 | { | ||
134 | struct spear13xx_pcie *spear13xx_pcie = arg; | ||
135 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
136 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
137 | struct pcie_port *pp = &pci->pp; | ||
138 | unsigned int status; | ||
139 | |||
140 | status = readl(&app_reg->int_sts); | ||
141 | |||
142 | if (status & MSI_CTRL_INT) { | ||
143 | BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI)); | ||
144 | dw_handle_msi_irq(pp); | ||
145 | } | ||
146 | |||
147 | writel(status, &app_reg->int_clr); | ||
148 | |||
149 | return IRQ_HANDLED; | ||
150 | } | ||
151 | |||
152 | static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) | ||
153 | { | ||
154 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
155 | struct pcie_port *pp = &pci->pp; | ||
156 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
157 | |||
158 | /* Enable MSI interrupt */ | ||
159 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
160 | dw_pcie_msi_init(pp); | ||
161 | writel(readl(&app_reg->int_mask) | | ||
162 | MSI_CTRL_INT, &app_reg->int_mask); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | static int spear13xx_pcie_link_up(struct dw_pcie *pci) | ||
167 | { | ||
168 | struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); | ||
169 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
170 | |||
171 | if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) | ||
172 | return 1; | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int spear13xx_pcie_host_init(struct pcie_port *pp) | ||
178 | { | ||
179 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
180 | struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); | ||
181 | |||
182 | spear13xx_pcie_establish_link(spear13xx_pcie); | ||
183 | spear13xx_pcie_enable_interrupts(spear13xx_pcie); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { | ||
189 | .host_init = spear13xx_pcie_host_init, | ||
190 | }; | ||
191 | |||
192 | static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, | ||
193 | struct platform_device *pdev) | ||
194 | { | ||
195 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
196 | struct pcie_port *pp = &pci->pp; | ||
197 | struct device *dev = &pdev->dev; | ||
198 | int ret; | ||
199 | |||
200 | pp->irq = platform_get_irq(pdev, 0); | ||
201 | if (pp->irq < 0) { | ||
202 | dev_err(dev, "failed to get irq\n"); | ||
203 | return pp->irq; | ||
204 | } | ||
205 | ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, | ||
206 | IRQF_SHARED | IRQF_NO_THREAD, | ||
207 | "spear1340-pcie", spear13xx_pcie); | ||
208 | if (ret) { | ||
209 | dev_err(dev, "failed to request irq %d\n", pp->irq); | ||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | pp->root_bus_nr = -1; | ||
214 | pp->ops = &spear13xx_pcie_host_ops; | ||
215 | |||
216 | ret = dw_pcie_host_init(pp); | ||
217 | if (ret) { | ||
218 | dev_err(dev, "failed to initialize host\n"); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
226 | .link_up = spear13xx_pcie_link_up, | ||
227 | }; | ||
228 | |||
229 | static int spear13xx_pcie_probe(struct platform_device *pdev) | ||
230 | { | ||
231 | struct device *dev = &pdev->dev; | ||
232 | struct dw_pcie *pci; | ||
233 | struct spear13xx_pcie *spear13xx_pcie; | ||
234 | struct device_node *np = dev->of_node; | ||
235 | struct resource *dbi_base; | ||
236 | int ret; | ||
237 | |||
238 | spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); | ||
239 | if (!spear13xx_pcie) | ||
240 | return -ENOMEM; | ||
241 | |||
242 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
243 | if (!pci) | ||
244 | return -ENOMEM; | ||
245 | |||
246 | pci->dev = dev; | ||
247 | pci->ops = &dw_pcie_ops; | ||
248 | |||
249 | spear13xx_pcie->pci = pci; | ||
250 | |||
251 | spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); | ||
252 | if (IS_ERR(spear13xx_pcie->phy)) { | ||
253 | ret = PTR_ERR(spear13xx_pcie->phy); | ||
254 | if (ret == -EPROBE_DEFER) | ||
255 | dev_info(dev, "probe deferred\n"); | ||
256 | else | ||
257 | dev_err(dev, "couldn't get pcie-phy\n"); | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | phy_init(spear13xx_pcie->phy); | ||
262 | |||
263 | spear13xx_pcie->clk = devm_clk_get(dev, NULL); | ||
264 | if (IS_ERR(spear13xx_pcie->clk)) { | ||
265 | dev_err(dev, "couldn't get clk for pcie\n"); | ||
266 | return PTR_ERR(spear13xx_pcie->clk); | ||
267 | } | ||
268 | ret = clk_prepare_enable(spear13xx_pcie->clk); | ||
269 | if (ret) { | ||
270 | dev_err(dev, "couldn't enable clk for pcie\n"); | ||
271 | return ret; | ||
272 | } | ||
273 | |||
274 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
275 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); | ||
276 | if (IS_ERR(pci->dbi_base)) { | ||
277 | dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); | ||
278 | ret = PTR_ERR(pci->dbi_base); | ||
279 | goto fail_clk; | ||
280 | } | ||
281 | spear13xx_pcie->app_base = pci->dbi_base + 0x2000; | ||
282 | |||
283 | if (of_property_read_bool(np, "st,pcie-is-gen1")) | ||
284 | spear13xx_pcie->is_gen1 = true; | ||
285 | |||
286 | platform_set_drvdata(pdev, spear13xx_pcie); | ||
287 | |||
288 | ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); | ||
289 | if (ret < 0) | ||
290 | goto fail_clk; | ||
291 | |||
292 | return 0; | ||
293 | |||
294 | fail_clk: | ||
295 | clk_disable_unprepare(spear13xx_pcie->clk); | ||
296 | |||
297 | return ret; | ||
298 | } | ||
299 | |||
300 | static const struct of_device_id spear13xx_pcie_of_match[] = { | ||
301 | { .compatible = "st,spear1340-pcie", }, | ||
302 | {}, | ||
303 | }; | ||
304 | |||
305 | static struct platform_driver spear13xx_pcie_driver = { | ||
306 | .probe = spear13xx_pcie_probe, | ||
307 | .driver = { | ||
308 | .name = "spear-pcie", | ||
309 | .of_match_table = of_match_ptr(spear13xx_pcie_of_match), | ||
310 | .suppress_bind_attrs = true, | ||
311 | }, | ||
312 | }; | ||
313 | |||
314 | builtin_platform_driver(spear13xx_pcie_driver); | ||