aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dwc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/dwc')
-rw-r--r--drivers/pci/dwc/Kconfig11
-rw-r--r--drivers/pci/dwc/Makefile1
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c6
-rw-r--r--drivers/pci/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/dwc/pci-imx6.c72
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pci-layerscape.c6
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c2
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c5
-rw-r--r--drivers/pci/dwc/pcie-designware.h2
-rw-r--r--drivers/pci/dwc/pcie-kirin.c517
-rw-r--r--drivers/pci/dwc/pcie-qcom.c440
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c2
14 files changed, 991 insertions, 79 deletions
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index b7e15526d676..d275aadc47ee 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -16,6 +16,7 @@ config PCIE_DW_EP
16 16
17config PCI_DRA7XX 17config PCI_DRA7XX
18 bool "TI DRA7xx PCIe controller" 18 bool "TI DRA7xx PCIe controller"
19 depends on SOC_DRA7XX || COMPILE_TEST
19 depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT 20 depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
20 depends on OF && HAS_IOMEM && TI_PIPE3 21 depends on OF && HAS_IOMEM && TI_PIPE3
21 help 22 help
@@ -158,4 +159,14 @@ config PCIE_ARTPEC6
158 Say Y here to enable PCIe controller support on Axis ARTPEC-6 159 Say Y here to enable PCIe controller support on Axis ARTPEC-6
159 SoCs. This PCIe controller uses the DesignWare core. 160 SoCs. This PCIe controller uses the DesignWare core.
160 161
162config PCIE_KIRIN
163 depends on OF && ARM64
164 bool "HiSilicon Kirin series SoCs PCIe controllers"
165 depends on PCI
166 select PCIEPORTBUS
167 select PCIE_DW_HOST
168 help
169 Say Y here if you want PCIe controller support
170 on HiSilicon Kirin series SoCs.
171
161endmenu 172endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index f31a8596442a..c61be9738cce 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
13obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o 13obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
14obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o 14obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
15obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o 15obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
16obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
16 17
17# The following drivers are for devices that use the generic ACPI 18# The following drivers are for devices that use the generic ACPI
18# pci_root.c driver but don't support standard ECAM config access. 19# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 8decf46cf525..f2fc5f47064e 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
174static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 174static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
175{ 175{
176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
177 ~LEG_EP_INTERRUPTS & ~MSI); 177 LEG_EP_INTERRUPTS | MSI);
178 178
179 dra7xx_pcie_writel(dra7xx, 179 dra7xx_pcie_writel(dra7xx,
180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
@@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
184static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 184static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
185{ 185{
186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
187 ~INTERRUPTS); 187 INTERRUPTS);
188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
189 INTERRUPTS); 189 INTERRUPTS);
190} 190}
@@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
208 dra7xx_pcie_enable_interrupts(dra7xx); 208 dra7xx_pcie_enable_interrupts(dra7xx);
209} 209}
210 210
211static struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 211static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
212 .host_init = dra7xx_pcie_host_init, 212 .host_init = dra7xx_pcie_host_init,
213}; 213};
214 214
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 546082ad5a3f..c78c06552590 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp)
590 exynos_pcie_enable_interrupts(ep); 590 exynos_pcie_enable_interrupts(ep);
591} 591}
592 592
593static struct dw_pcie_host_ops exynos_pcie_host_ops = { 593static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
594 .rd_own_conf = exynos_pcie_rd_own_conf, 594 .rd_own_conf = exynos_pcie_rd_own_conf,
595 .wr_own_conf = exynos_pcie_wr_own_conf, 595 .wr_own_conf = exynos_pcie_wr_own_conf,
596 .host_init = exynos_pcie_host_init, 596 .host_init = exynos_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index a98cba55c7f0..bf5c3616e344 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -24,6 +24,7 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/regmap.h> 26#include <linux/regmap.h>
27#include <linux/regulator/consumer.h>
27#include <linux/resource.h> 28#include <linux/resource.h>
28#include <linux/signal.h> 29#include <linux/signal.h>
29#include <linux/types.h> 30#include <linux/types.h>
@@ -59,6 +60,7 @@ struct imx6_pcie {
59 u32 tx_swing_full; 60 u32 tx_swing_full;
60 u32 tx_swing_low; 61 u32 tx_swing_low;
61 int link_gen; 62 int link_gen;
63 struct regulator *vpcie;
62}; 64};
63 65
64/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ 66/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -252,11 +254,40 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
252static int imx6q_pcie_abort_handler(unsigned long addr, 254static int imx6q_pcie_abort_handler(unsigned long addr,
253 unsigned int fsr, struct pt_regs *regs) 255 unsigned int fsr, struct pt_regs *regs)
254{ 256{
255 return 0; 257 unsigned long pc = instruction_pointer(regs);
258 unsigned long instr = *(unsigned long *)pc;
259 int reg = (instr >> 12) & 15;
260
261 /*
262 * If the instruction being executed was a read,
263 * make it look like it read all-ones.
264 */
265 if ((instr & 0x0c100000) == 0x04100000) {
266 unsigned long val;
267
268 if (instr & 0x00400000)
269 val = 255;
270 else
271 val = -1;
272
273 regs->uregs[reg] = val;
274 regs->ARM_pc += 4;
275 return 0;
276 }
277
278 if ((instr & 0x0e100090) == 0x00100090) {
279 regs->uregs[reg] = -1;
280 regs->ARM_pc += 4;
281 return 0;
282 }
283
284 return 1;
256} 285}
257 286
258static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 287static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
259{ 288{
289 struct device *dev = imx6_pcie->pci->dev;
290
260 switch (imx6_pcie->variant) { 291 switch (imx6_pcie->variant) {
261 case IMX7D: 292 case IMX7D:
262 reset_control_assert(imx6_pcie->pciephy_reset); 293 reset_control_assert(imx6_pcie->pciephy_reset);
@@ -283,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
283 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); 314 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
284 break; 315 break;
285 } 316 }
317
318 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
319 int ret = regulator_disable(imx6_pcie->vpcie);
320
321 if (ret)
322 dev_err(dev, "failed to disable vpcie regulator: %d\n",
323 ret);
324 }
286} 325}
287 326
288static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) 327static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
@@ -349,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
349 struct device *dev = pci->dev; 388 struct device *dev = pci->dev;
350 int ret; 389 int ret;
351 390
391 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
392 ret = regulator_enable(imx6_pcie->vpcie);
393 if (ret) {
394 dev_err(dev, "failed to enable vpcie regulator: %d\n",
395 ret);
396 return;
397 }
398 }
399
352 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 400 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
353 if (ret) { 401 if (ret) {
354 dev_err(dev, "unable to enable pcie_phy clock\n"); 402 dev_err(dev, "unable to enable pcie_phy clock\n");
355 return; 403 goto err_pcie_phy;
356 } 404 }
357 405
358 ret = clk_prepare_enable(imx6_pcie->pcie_bus); 406 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
@@ -412,6 +460,13 @@ err_pcie:
412 clk_disable_unprepare(imx6_pcie->pcie_bus); 460 clk_disable_unprepare(imx6_pcie->pcie_bus);
413err_pcie_bus: 461err_pcie_bus:
414 clk_disable_unprepare(imx6_pcie->pcie_phy); 462 clk_disable_unprepare(imx6_pcie->pcie_phy);
463err_pcie_phy:
464 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
465 ret = regulator_disable(imx6_pcie->vpcie);
466 if (ret)
467 dev_err(dev, "failed to disable vpcie regulator: %d\n",
468 ret);
469 }
415} 470}
416 471
417static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) 472static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
@@ -602,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci)
602 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; 657 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
603} 658}
604 659
605static struct dw_pcie_host_ops imx6_pcie_host_ops = { 660static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
606 .host_init = imx6_pcie_host_init, 661 .host_init = imx6_pcie_host_init,
607}; 662};
608 663
@@ -775,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
775 if (ret) 830 if (ret)
776 imx6_pcie->link_gen = 1; 831 imx6_pcie->link_gen = 1;
777 832
833 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
834 if (IS_ERR(imx6_pcie->vpcie)) {
835 if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
836 return -EPROBE_DEFER;
837 imx6_pcie->vpcie = NULL;
838 }
839
778 platform_set_drvdata(pdev, imx6_pcie); 840 platform_set_drvdata(pdev, imx6_pcie);
779 841
780 ret = imx6_add_pcie_port(imx6_pcie, pdev); 842 ret = imx6_add_pcie_port(imx6_pcie, pdev);
@@ -819,8 +881,8 @@ static int __init imx6_pcie_init(void)
819 * we can install the handler here without risking it 881 * we can install the handler here without risking it
820 * accessing some uninitialized driver state. 882 * accessing some uninitialized driver state.
821 */ 883 */
822 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, 884 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
823 "imprecise external abort"); 885 "external abort on non-linefetch");
824 886
825 return platform_driver_register(&imx6_pcie_driver); 887 return platform_driver_register(&imx6_pcie_driver);
826} 888}
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index fcc9723bad6e..4783cec1f78d 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
291 "Asynchronous external abort"); 291 "Asynchronous external abort");
292} 292}
293 293
294static struct dw_pcie_host_ops keystone_pcie_host_ops = { 294static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
295 .rd_other_conf = ks_dw_pcie_rd_other_conf, 295 .rd_other_conf = ks_dw_pcie_rd_other_conf,
296 .wr_other_conf = ks_dw_pcie_wr_other_conf, 296 .wr_other_conf = ks_dw_pcie_wr_other_conf,
297 .host_init = ks_pcie_host_init, 297 .host_init = ks_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index 27d638c4e134..fd861289ad8b 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -39,7 +39,7 @@ struct ls_pcie_drvdata {
39 u32 lut_offset; 39 u32 lut_offset;
40 u32 ltssm_shift; 40 u32 ltssm_shift;
41 u32 lut_dbg; 41 u32 lut_dbg;
42 struct dw_pcie_host_ops *ops; 42 const struct dw_pcie_host_ops *ops;
43 const struct dw_pcie_ops *dw_pcie_ops; 43 const struct dw_pcie_ops *dw_pcie_ops;
44}; 44};
45 45
@@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
185 return 0; 185 return 0;
186} 186}
187 187
188static struct dw_pcie_host_ops ls1021_pcie_host_ops = { 188static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
189 .host_init = ls1021_pcie_host_init, 189 .host_init = ls1021_pcie_host_init,
190 .msi_host_init = ls_pcie_msi_host_init, 190 .msi_host_init = ls_pcie_msi_host_init,
191}; 191};
192 192
193static struct dw_pcie_host_ops ls_pcie_host_ops = { 193static const struct dw_pcie_host_ops ls_pcie_host_ops = {
194 .host_init = ls_pcie_host_init, 194 .host_init = ls_pcie_host_init,
195 .msi_host_init = ls_pcie_msi_host_init, 195 .msi_host_init = ls_pcie_msi_host_init,
196}; 196};
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index 495b023042b3..ea8f34af6a85 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
160 return IRQ_HANDLED; 160 return IRQ_HANDLED;
161} 161}
162 162
163static struct dw_pcie_host_ops armada8k_pcie_host_ops = { 163static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
164 .host_init = armada8k_pcie_host_init, 164 .host_init = armada8k_pcie_host_init,
165}; 165};
166 166
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 82a04acc42fd..01c6f7823672 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp)
184 artpec6_pcie_enable_interrupts(artpec6_pcie); 184 artpec6_pcie_enable_interrupts(artpec6_pcie);
185} 185}
186 186
187static struct dw_pcie_host_ops artpec6_pcie_host_ops = { 187static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
188 .host_init = artpec6_pcie_host_init, 188 .host_init = artpec6_pcie_host_init,
189}; 189};
190 190
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 32091b32f6e1..091b4e7ad059 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp)
46 dw_pcie_msi_init(pp); 46 dw_pcie_msi_init(pp);
47} 47}
48 48
49static struct dw_pcie_host_ops dw_plat_pcie_host_ops = { 49static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
50 .host_init = dw_plat_pcie_host_init, 50 .host_init = dw_plat_pcie_host_init,
51}; 51};
52 52
@@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
67 67
68 ret = devm_request_irq(dev, pp->msi_irq, 68 ret = devm_request_irq(dev, pp->msi_irq,
69 dw_plat_pcie_msi_irq_handler, 69 dw_plat_pcie_msi_irq_handler,
70 IRQF_SHARED, "dw-plat-pcie-msi", pp); 70 IRQF_SHARED | IRQF_NO_THREAD,
71 "dw-plat-pcie-msi", pp);
71 if (ret) { 72 if (ret) {
72 dev_err(dev, "failed to request MSI IRQ\n"); 73 dev_err(dev, "failed to request MSI IRQ\n");
73 return ret; 74 return ret;
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index c6a840575796..b4d2a89f8e58 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -162,7 +162,7 @@ struct pcie_port {
162 struct resource *mem; 162 struct resource *mem;
163 struct resource *busn; 163 struct resource *busn;
164 int irq; 164 int irq;
165 struct dw_pcie_host_ops *ops; 165 const struct dw_pcie_host_ops *ops;
166 int msi_irq; 166 int msi_irq;
167 struct irq_domain *irq_domain; 167 struct irq_domain *irq_domain;
168 unsigned long msi_data; 168 unsigned long msi_data;
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
new file mode 100644
index 000000000000..33fddb9f6739
--- /dev/null
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -0,0 +1,517 @@
1/*
2 * PCIe host controller driver for Kirin Phone SoCs
3 *
4 * Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
5 * http://www.huawei.com
6 *
7 * Author: Xiaowei Song <songxiaowei@huawei.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <asm/compiler.h>
15#include <linux/compiler.h>
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/gpio.h>
20#include <linux/interrupt.h>
21#include <linux/mfd/syscon.h>
22#include <linux/of_address.h>
23#include <linux/of_gpio.h>
24#include <linux/of_pci.h>
25#include <linux/pci.h>
26#include <linux/pci_regs.h>
27#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/resource.h>
30#include <linux/types.h>
31#include "pcie-designware.h"
32
33#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
34
35#define REF_CLK_FREQ 100000000
36
37/* PCIe ELBI registers */
38#define SOC_PCIECTRL_CTRL0_ADDR 0x000
39#define SOC_PCIECTRL_CTRL1_ADDR 0x004
40#define SOC_PCIEPHY_CTRL2_ADDR 0x008
41#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
42#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
43
44/* info located in APB */
45#define PCIE_APP_LTSSM_ENABLE 0x01c
46#define PCIE_APB_PHY_CTRL0 0x0
47#define PCIE_APB_PHY_CTRL1 0x4
48#define PCIE_APB_PHY_STATUS0 0x400
49#define PCIE_LINKUP_ENABLE (0x8020)
50#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
51#define PIPE_CLK_STABLE (0x1 << 19)
52#define PHY_REF_PAD_BIT (0x1 << 8)
53#define PHY_PWR_DOWN_BIT (0x1 << 22)
54#define PHY_RST_ACK_BIT (0x1 << 16)
55
56/* info located in sysctrl */
57#define SCTRL_PCIE_CMOS_OFFSET 0x60
58#define SCTRL_PCIE_CMOS_BIT 0x10
59#define SCTRL_PCIE_ISO_OFFSET 0x44
60#define SCTRL_PCIE_ISO_BIT 0x30
61#define SCTRL_PCIE_HPCLK_OFFSET 0x190
62#define SCTRL_PCIE_HPCLK_BIT 0x184000
63#define SCTRL_PCIE_OE_OFFSET 0x14a
64#define PCIE_DEBOUNCE_PARAM 0xF0F400
65#define PCIE_OE_BYPASS (0x3 << 28)
66
67/* peri_crg ctrl */
68#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
69#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
70
71/* Time for delay */
72#define REF_2_PERST_MIN 20000
73#define REF_2_PERST_MAX 25000
74#define PERST_2_ACCESS_MIN 10000
75#define PERST_2_ACCESS_MAX 12000
76#define LINK_WAIT_MIN 900
77#define LINK_WAIT_MAX 1000
78#define PIPE_CLK_WAIT_MIN 550
79#define PIPE_CLK_WAIT_MAX 600
80#define TIME_CMOS_MIN 100
81#define TIME_CMOS_MAX 105
82#define TIME_PHY_PD_MIN 10
83#define TIME_PHY_PD_MAX 11
84
85struct kirin_pcie {
86 struct dw_pcie *pci;
87 void __iomem *apb_base;
88 void __iomem *phy_base;
89 struct regmap *crgctrl;
90 struct regmap *sysctrl;
91 struct clk *apb_sys_clk;
92 struct clk *apb_phy_clk;
93 struct clk *phy_ref_clk;
94 struct clk *pcie_aclk;
95 struct clk *pcie_aux_clk;
96 int gpio_id_reset;
97};
98
99/* Registers in PCIeCTRL */
100static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
101 u32 val, u32 reg)
102{
103 writel(val, kirin_pcie->apb_base + reg);
104}
105
106static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
107{
108 return readl(kirin_pcie->apb_base + reg);
109}
110
111/* Registers in PCIePHY */
112static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
113 u32 val, u32 reg)
114{
115 writel(val, kirin_pcie->phy_base + reg);
116}
117
118static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
119{
120 return readl(kirin_pcie->phy_base + reg);
121}
122
123static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
124 struct platform_device *pdev)
125{
126 struct device *dev = &pdev->dev;
127
128 kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
129 if (IS_ERR(kirin_pcie->phy_ref_clk))
130 return PTR_ERR(kirin_pcie->phy_ref_clk);
131
132 kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
133 if (IS_ERR(kirin_pcie->pcie_aux_clk))
134 return PTR_ERR(kirin_pcie->pcie_aux_clk);
135
136 kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
137 if (IS_ERR(kirin_pcie->apb_phy_clk))
138 return PTR_ERR(kirin_pcie->apb_phy_clk);
139
140 kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
141 if (IS_ERR(kirin_pcie->apb_sys_clk))
142 return PTR_ERR(kirin_pcie->apb_sys_clk);
143
144 kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
145 if (IS_ERR(kirin_pcie->pcie_aclk))
146 return PTR_ERR(kirin_pcie->pcie_aclk);
147
148 return 0;
149}
150
151static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
152 struct platform_device *pdev)
153{
154 struct device *dev = &pdev->dev;
155 struct resource *apb;
156 struct resource *phy;
157 struct resource *dbi;
158
159 apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
160 kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
161 if (IS_ERR(kirin_pcie->apb_base))
162 return PTR_ERR(kirin_pcie->apb_base);
163
164 phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
165 kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
166 if (IS_ERR(kirin_pcie->phy_base))
167 return PTR_ERR(kirin_pcie->phy_base);
168
169 dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
170 kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
171 if (IS_ERR(kirin_pcie->pci->dbi_base))
172 return PTR_ERR(kirin_pcie->pci->dbi_base);
173
174 kirin_pcie->crgctrl =
175 syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
176 if (IS_ERR(kirin_pcie->crgctrl))
177 return PTR_ERR(kirin_pcie->crgctrl);
178
179 kirin_pcie->sysctrl =
180 syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
181 if (IS_ERR(kirin_pcie->sysctrl))
182 return PTR_ERR(kirin_pcie->sysctrl);
183
184 return 0;
185}
186
187static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
188{
189 struct device *dev = kirin_pcie->pci->dev;
190 u32 reg_val;
191
192 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
193 reg_val &= ~PHY_REF_PAD_BIT;
194 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
195
196 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
197 reg_val &= ~PHY_PWR_DOWN_BIT;
198 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
199 usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
200
201 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
202 reg_val &= ~PHY_RST_ACK_BIT;
203 kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
204
205 usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
206 reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
207 if (reg_val & PIPE_CLK_STABLE) {
208 dev_err(dev, "PIPE clk is not stable\n");
209 return -EINVAL;
210 }
211
212 return 0;
213}
214
215static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
216{
217 u32 val;
218
219 regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
220 val |= PCIE_DEBOUNCE_PARAM;
221 val &= ~PCIE_OE_BYPASS;
222 regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
223}
224
225static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
226{
227 int ret = 0;
228
229 if (!enable)
230 goto close_clk;
231
232 ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
233 if (ret)
234 return ret;
235
236 ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
237 if (ret)
238 return ret;
239
240 ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
241 if (ret)
242 goto apb_sys_fail;
243
244 ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
245 if (ret)
246 goto apb_phy_fail;
247
248 ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
249 if (ret)
250 goto aclk_fail;
251
252 ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
253 if (ret)
254 goto aux_clk_fail;
255
256 return 0;
257
258close_clk:
259 clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
260aux_clk_fail:
261 clk_disable_unprepare(kirin_pcie->pcie_aclk);
262aclk_fail:
263 clk_disable_unprepare(kirin_pcie->apb_phy_clk);
264apb_phy_fail:
265 clk_disable_unprepare(kirin_pcie->apb_sys_clk);
266apb_sys_fail:
267 clk_disable_unprepare(kirin_pcie->phy_ref_clk);
268
269 return ret;
270}
271
272static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
273{
274 int ret;
275
276 /* Power supply for Host */
277 regmap_write(kirin_pcie->sysctrl,
278 SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
279 usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
280 kirin_pcie_oe_enable(kirin_pcie);
281
282 ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
283 if (ret)
284 return ret;
285
286 /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
287 regmap_write(kirin_pcie->sysctrl,
288 SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
289 regmap_write(kirin_pcie->crgctrl,
290 CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
291 regmap_write(kirin_pcie->sysctrl,
292 SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
293
294 ret = kirin_pcie_phy_init(kirin_pcie);
295 if (ret)
296 goto close_clk;
297
298 /* perst assert Endpoint */
299 if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
300 usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
301 ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
302 if (ret)
303 goto close_clk;
304 usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
305
306 return 0;
307 }
308
309close_clk:
310 kirin_pcie_clk_ctrl(kirin_pcie, false);
311 return ret;
312}
313
314static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
315 bool on)
316{
317 u32 val;
318
319 val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
320 if (on)
321 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
322 else
323 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
324
325 kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
326}
327
328static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
329 bool on)
330{
331 u32 val;
332
333 val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
334 if (on)
335 val = val | PCIE_ELBI_SLV_DBI_ENABLE;
336 else
337 val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
338
339 kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
340}
341
342static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
343 int where, int size, u32 *val)
344{
345 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
346 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
347 int ret;
348
349 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
350 ret = dw_pcie_read(pci->dbi_base + where, size, val);
351 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
352
353 return ret;
354}
355
356static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
357 int where, int size, u32 val)
358{
359 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
360 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
361 int ret;
362
363 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
364 ret = dw_pcie_write(pci->dbi_base + where, size, val);
365 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
366
367 return ret;
368}
369
370static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
371 u32 reg, size_t size)
372{
373 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
374 u32 ret;
375
376 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
377 dw_pcie_read(base + reg, size, &ret);
378 kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
379
380 return ret;
381}
382
383static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
384 u32 reg, size_t size, u32 val)
385{
386 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
387
388 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
389 dw_pcie_write(base + reg, size, val);
390 kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
391}
392
393static int kirin_pcie_link_up(struct dw_pcie *pci)
394{
395 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
396 u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
397
398 if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
399 return 1;
400
401 return 0;
402}
403
404static int kirin_pcie_establish_link(struct pcie_port *pp)
405{
406 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
407 struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
408 struct device *dev = kirin_pcie->pci->dev;
409 int count = 0;
410
411 if (kirin_pcie_link_up(pci))
412 return 0;
413
414 dw_pcie_setup_rc(pp);
415
416 /* assert LTSSM enable */
417 kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
418 PCIE_APP_LTSSM_ENABLE);
419
420 /* check if the link is up or not */
421 while (!kirin_pcie_link_up(pci)) {
422 usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
423 count++;
424 if (count == 1000) {
425 dev_err(dev, "Link Fail\n");
426 return -EINVAL;
427 }
428 }
429
430 return 0;
431}
432
433static void kirin_pcie_host_init(struct pcie_port *pp)
434{
435 kirin_pcie_establish_link(pp);
436}
437
438static struct dw_pcie_ops kirin_dw_pcie_ops = {
439 .read_dbi = kirin_pcie_read_dbi,
440 .write_dbi = kirin_pcie_write_dbi,
441 .link_up = kirin_pcie_link_up,
442};
443
444static struct dw_pcie_host_ops kirin_pcie_host_ops = {
445 .rd_own_conf = kirin_pcie_rd_own_conf,
446 .wr_own_conf = kirin_pcie_wr_own_conf,
447 .host_init = kirin_pcie_host_init,
448};
449
450static int __init kirin_add_pcie_port(struct dw_pcie *pci,
451 struct platform_device *pdev)
452{
453 pci->pp.ops = &kirin_pcie_host_ops;
454
455 return dw_pcie_host_init(&pci->pp);
456}
457
458static int kirin_pcie_probe(struct platform_device *pdev)
459{
460 struct device *dev = &pdev->dev;
461 struct kirin_pcie *kirin_pcie;
462 struct dw_pcie *pci;
463 int ret;
464
465 if (!dev->of_node) {
466 dev_err(dev, "NULL node\n");
467 return -EINVAL;
468 }
469
470 kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
471 if (!kirin_pcie)
472 return -ENOMEM;
473
474 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
475 if (!pci)
476 return -ENOMEM;
477
478 pci->dev = dev;
479 pci->ops = &kirin_dw_pcie_ops;
480 kirin_pcie->pci = pci;
481
482 ret = kirin_pcie_get_clk(kirin_pcie, pdev);
483 if (ret)
484 return ret;
485
486 ret = kirin_pcie_get_resource(kirin_pcie, pdev);
487 if (ret)
488 return ret;
489
490 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
491 "reset-gpio", 0);
492 if (kirin_pcie->gpio_id_reset < 0)
493 return -ENODEV;
494
495 ret = kirin_pcie_power_on(kirin_pcie);
496 if (ret)
497 return ret;
498
499 platform_set_drvdata(pdev, kirin_pcie);
500
501 return kirin_add_pcie_port(pci, pdev);
502}
503
504static const struct of_device_id kirin_pcie_match[] = {
505 { .compatible = "hisilicon,kirin960-pcie" },
506 {},
507};
508
509struct platform_driver kirin_pcie_driver = {
510 .probe = kirin_pcie_probe,
511 .driver = {
512 .name = "kirin-pcie",
513 .of_match_table = kirin_pcie_match,
514 .suppress_bind_attrs = true,
515 },
516};
517builtin_platform_driver(kirin_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5bf23d432fdb..68c5f2ab5bc8 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -51,6 +51,12 @@
51#define PCIE20_ELBI_SYS_CTRL 0x04 51#define PCIE20_ELBI_SYS_CTRL 0x04
52#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 52#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
53 53
54#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
55#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
56#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
57#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
58#define CFG_BRIDGE_SB_INIT BIT(0)
59
54#define PCIE20_CAP 0x70 60#define PCIE20_CAP 0x70
55 61
56#define PERST_DELAY_US 1000 62#define PERST_DELAY_US 1000
@@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 {
86 struct clk *pipe_clk; 92 struct clk *pipe_clk;
87}; 93};
88 94
95struct qcom_pcie_resources_v3 {
96 struct clk *aux_clk;
97 struct clk *master_clk;
98 struct clk *slave_clk;
99 struct reset_control *axi_m_reset;
100 struct reset_control *axi_s_reset;
101 struct reset_control *pipe_reset;
102 struct reset_control *axi_m_vmid_reset;
103 struct reset_control *axi_s_xpu_reset;
104 struct reset_control *parf_reset;
105 struct reset_control *phy_reset;
106 struct reset_control *axi_m_sticky_reset;
107 struct reset_control *pipe_sticky_reset;
108 struct reset_control *pwr_reset;
109 struct reset_control *ahb_reset;
110 struct reset_control *phy_ahb_reset;
111};
112
89union qcom_pcie_resources { 113union qcom_pcie_resources {
90 struct qcom_pcie_resources_v0 v0; 114 struct qcom_pcie_resources_v0 v0;
91 struct qcom_pcie_resources_v1 v1; 115 struct qcom_pcie_resources_v1 v1;
92 struct qcom_pcie_resources_v2 v2; 116 struct qcom_pcie_resources_v2 v2;
117 struct qcom_pcie_resources_v3 v3;
93}; 118};
94 119
95struct qcom_pcie; 120struct qcom_pcie;
@@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
133 return dw_handle_msi_irq(pp); 158 return dw_handle_msi_irq(pp);
134} 159}
135 160
136static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
137{
138 u32 val;
139
140 /* enable link training */
141 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
142 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
143 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
144}
145
146static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
147{
148 u32 val;
149
150 /* enable link training */
151 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
152 val |= BIT(8);
153 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
154}
155
156static int qcom_pcie_establish_link(struct qcom_pcie *pcie) 161static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
157{ 162{
158 struct dw_pcie *pci = pcie->pci; 163 struct dw_pcie *pci = pcie->pci;
@@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
167 return dw_pcie_wait_for_link(pci); 172 return dw_pcie_wait_for_link(pci);
168} 173}
169 174
175static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
176{
177 u32 val;
178
179 /* enable link training */
180 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
181 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
182 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
183}
184
170static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) 185static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
171{ 186{
172 struct qcom_pcie_resources_v0 *res = &pcie->res.v0; 187 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
217 return PTR_ERR_OR_ZERO(res->phy_reset); 232 return PTR_ERR_OR_ZERO(res->phy_reset);
218} 233}
219 234
220static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
221{
222 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
223 struct dw_pcie *pci = pcie->pci;
224 struct device *dev = pci->dev;
225
226 res->vdda = devm_regulator_get(dev, "vdda");
227 if (IS_ERR(res->vdda))
228 return PTR_ERR(res->vdda);
229
230 res->iface = devm_clk_get(dev, "iface");
231 if (IS_ERR(res->iface))
232 return PTR_ERR(res->iface);
233
234 res->aux = devm_clk_get(dev, "aux");
235 if (IS_ERR(res->aux))
236 return PTR_ERR(res->aux);
237
238 res->master_bus = devm_clk_get(dev, "master_bus");
239 if (IS_ERR(res->master_bus))
240 return PTR_ERR(res->master_bus);
241
242 res->slave_bus = devm_clk_get(dev, "slave_bus");
243 if (IS_ERR(res->slave_bus))
244 return PTR_ERR(res->slave_bus);
245
246 res->core = devm_reset_control_get(dev, "core");
247 return PTR_ERR_OR_ZERO(res->core);
248}
249
250static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) 235static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
251{ 236{
252 struct qcom_pcie_resources_v0 *res = &pcie->res.v0; 237 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
357 /* wait for clock acquisition */ 342 /* wait for clock acquisition */
358 usleep_range(1000, 1500); 343 usleep_range(1000, 1500);
359 344
345
346 /* Set the Max TLP size to 2K, instead of using default of 4K */
347 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
348 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
349 writel(CFG_BRIDGE_SB_INIT,
350 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
351
360 return 0; 352 return 0;
361 353
362err_deassert_ahb: 354err_deassert_ahb:
@@ -375,6 +367,36 @@ err_refclk:
375 return ret; 367 return ret;
376} 368}
377 369
370static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
371{
372 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
373 struct dw_pcie *pci = pcie->pci;
374 struct device *dev = pci->dev;
375
376 res->vdda = devm_regulator_get(dev, "vdda");
377 if (IS_ERR(res->vdda))
378 return PTR_ERR(res->vdda);
379
380 res->iface = devm_clk_get(dev, "iface");
381 if (IS_ERR(res->iface))
382 return PTR_ERR(res->iface);
383
384 res->aux = devm_clk_get(dev, "aux");
385 if (IS_ERR(res->aux))
386 return PTR_ERR(res->aux);
387
388 res->master_bus = devm_clk_get(dev, "master_bus");
389 if (IS_ERR(res->master_bus))
390 return PTR_ERR(res->master_bus);
391
392 res->slave_bus = devm_clk_get(dev, "slave_bus");
393 if (IS_ERR(res->slave_bus))
394 return PTR_ERR(res->slave_bus);
395
396 res->core = devm_reset_control_get(dev, "core");
397 return PTR_ERR_OR_ZERO(res->core);
398}
399
378static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) 400static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
379{ 401{
380 struct qcom_pcie_resources_v1 *res = &pcie->res.v1; 402 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
@@ -455,6 +477,16 @@ err_res:
455 return ret; 477 return ret;
456} 478}
457 479
480static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
481{
482 u32 val;
483
484 /* enable link training */
485 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
486 val |= BIT(8);
487 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
488}
489
458static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) 490static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
459{ 491{
460 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 492 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
481 return PTR_ERR_OR_ZERO(res->pipe_clk); 513 return PTR_ERR_OR_ZERO(res->pipe_clk);
482} 514}
483 515
516static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
517{
518 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
519
520 clk_disable_unprepare(res->pipe_clk);
521 clk_disable_unprepare(res->slave_clk);
522 clk_disable_unprepare(res->master_clk);
523 clk_disable_unprepare(res->cfg_clk);
524 clk_disable_unprepare(res->aux_clk);
525}
526
484static int qcom_pcie_init_v2(struct qcom_pcie *pcie) 527static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
485{ 528{
486 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 529 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -562,22 +605,290 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
562 return 0; 605 return 0;
563} 606}
564 607
565static int qcom_pcie_link_up(struct dw_pcie *pci) 608static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
566{ 609{
567 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); 610 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
611 struct dw_pcie *pci = pcie->pci;
612 struct device *dev = pci->dev;
568 613
569 return !!(val & PCI_EXP_LNKSTA_DLLLA); 614 res->aux_clk = devm_clk_get(dev, "aux");
615 if (IS_ERR(res->aux_clk))
616 return PTR_ERR(res->aux_clk);
617
618 res->master_clk = devm_clk_get(dev, "master_bus");
619 if (IS_ERR(res->master_clk))
620 return PTR_ERR(res->master_clk);
621
622 res->slave_clk = devm_clk_get(dev, "slave_bus");
623 if (IS_ERR(res->slave_clk))
624 return PTR_ERR(res->slave_clk);
625
626 res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
627 if (IS_ERR(res->axi_m_reset))
628 return PTR_ERR(res->axi_m_reset);
629
630 res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
631 if (IS_ERR(res->axi_s_reset))
632 return PTR_ERR(res->axi_s_reset);
633
634 res->pipe_reset = devm_reset_control_get(dev, "pipe");
635 if (IS_ERR(res->pipe_reset))
636 return PTR_ERR(res->pipe_reset);
637
638 res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
639 if (IS_ERR(res->axi_m_vmid_reset))
640 return PTR_ERR(res->axi_m_vmid_reset);
641
642 res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
643 if (IS_ERR(res->axi_s_xpu_reset))
644 return PTR_ERR(res->axi_s_xpu_reset);
645
646 res->parf_reset = devm_reset_control_get(dev, "parf");
647 if (IS_ERR(res->parf_reset))
648 return PTR_ERR(res->parf_reset);
649
650 res->phy_reset = devm_reset_control_get(dev, "phy");
651 if (IS_ERR(res->phy_reset))
652 return PTR_ERR(res->phy_reset);
653
654 res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
655 if (IS_ERR(res->axi_m_sticky_reset))
656 return PTR_ERR(res->axi_m_sticky_reset);
657
658 res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
659 if (IS_ERR(res->pipe_sticky_reset))
660 return PTR_ERR(res->pipe_sticky_reset);
661
662 res->pwr_reset = devm_reset_control_get(dev, "pwr");
663 if (IS_ERR(res->pwr_reset))
664 return PTR_ERR(res->pwr_reset);
665
666 res->ahb_reset = devm_reset_control_get(dev, "ahb");
667 if (IS_ERR(res->ahb_reset))
668 return PTR_ERR(res->ahb_reset);
669
670 res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
671 if (IS_ERR(res->phy_ahb_reset))
672 return PTR_ERR(res->phy_ahb_reset);
673
674 return 0;
570} 675}
571 676
572static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) 677static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
573{ 678{
574 struct qcom_pcie_resources_v2 *res = &pcie->res.v2; 679 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
575 680
576 clk_disable_unprepare(res->pipe_clk); 681 reset_control_assert(res->axi_m_reset);
682 reset_control_assert(res->axi_s_reset);
683 reset_control_assert(res->pipe_reset);
684 reset_control_assert(res->pipe_sticky_reset);
685 reset_control_assert(res->phy_reset);
686 reset_control_assert(res->phy_ahb_reset);
687 reset_control_assert(res->axi_m_sticky_reset);
688 reset_control_assert(res->pwr_reset);
689 reset_control_assert(res->ahb_reset);
690 clk_disable_unprepare(res->aux_clk);
691 clk_disable_unprepare(res->master_clk);
577 clk_disable_unprepare(res->slave_clk); 692 clk_disable_unprepare(res->slave_clk);
693}
694
695static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
696{
697 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
698 struct dw_pcie *pci = pcie->pci;
699 struct device *dev = pci->dev;
700 u32 val;
701 int ret;
702
703 ret = reset_control_assert(res->axi_m_reset);
704 if (ret) {
705 dev_err(dev, "cannot assert axi master reset\n");
706 return ret;
707 }
708
709 ret = reset_control_assert(res->axi_s_reset);
710 if (ret) {
711 dev_err(dev, "cannot assert axi slave reset\n");
712 return ret;
713 }
714
715 usleep_range(10000, 12000);
716
717 ret = reset_control_assert(res->pipe_reset);
718 if (ret) {
719 dev_err(dev, "cannot assert pipe reset\n");
720 return ret;
721 }
722
723 ret = reset_control_assert(res->pipe_sticky_reset);
724 if (ret) {
725 dev_err(dev, "cannot assert pipe sticky reset\n");
726 return ret;
727 }
728
729 ret = reset_control_assert(res->phy_reset);
730 if (ret) {
731 dev_err(dev, "cannot assert phy reset\n");
732 return ret;
733 }
734
735 ret = reset_control_assert(res->phy_ahb_reset);
736 if (ret) {
737 dev_err(dev, "cannot assert phy ahb reset\n");
738 return ret;
739 }
740
741 usleep_range(10000, 12000);
742
743 ret = reset_control_assert(res->axi_m_sticky_reset);
744 if (ret) {
745 dev_err(dev, "cannot assert axi master sticky reset\n");
746 return ret;
747 }
748
749 ret = reset_control_assert(res->pwr_reset);
750 if (ret) {
751 dev_err(dev, "cannot assert power reset\n");
752 return ret;
753 }
754
755 ret = reset_control_assert(res->ahb_reset);
756 if (ret) {
757 dev_err(dev, "cannot assert ahb reset\n");
758 return ret;
759 }
760
761 usleep_range(10000, 12000);
762
763 ret = reset_control_deassert(res->phy_ahb_reset);
764 if (ret) {
765 dev_err(dev, "cannot deassert phy ahb reset\n");
766 return ret;
767 }
768
769 ret = reset_control_deassert(res->phy_reset);
770 if (ret) {
771 dev_err(dev, "cannot deassert phy reset\n");
772 goto err_rst_phy;
773 }
774
775 ret = reset_control_deassert(res->pipe_reset);
776 if (ret) {
777 dev_err(dev, "cannot deassert pipe reset\n");
778 goto err_rst_pipe;
779 }
780
781 ret = reset_control_deassert(res->pipe_sticky_reset);
782 if (ret) {
783 dev_err(dev, "cannot deassert pipe sticky reset\n");
784 goto err_rst_pipe_sticky;
785 }
786
787 usleep_range(10000, 12000);
788
789 ret = reset_control_deassert(res->axi_m_reset);
790 if (ret) {
791 dev_err(dev, "cannot deassert axi master reset\n");
792 goto err_rst_axi_m;
793 }
794
795 ret = reset_control_deassert(res->axi_m_sticky_reset);
796 if (ret) {
797 dev_err(dev, "cannot deassert axi master sticky reset\n");
798 goto err_rst_axi_m_sticky;
799 }
800
801 ret = reset_control_deassert(res->axi_s_reset);
802 if (ret) {
803 dev_err(dev, "cannot deassert axi slave reset\n");
804 goto err_rst_axi_s;
805 }
806
807 ret = reset_control_deassert(res->pwr_reset);
808 if (ret) {
809 dev_err(dev, "cannot deassert power reset\n");
810 goto err_rst_pwr;
811 }
812
813 ret = reset_control_deassert(res->ahb_reset);
814 if (ret) {
815 dev_err(dev, "cannot deassert ahb reset\n");
816 goto err_rst_ahb;
817 }
818
819 usleep_range(10000, 12000);
820
821 ret = clk_prepare_enable(res->aux_clk);
822 if (ret) {
823 dev_err(dev, "cannot prepare/enable iface clock\n");
824 goto err_clk_aux;
825 }
826
827 ret = clk_prepare_enable(res->master_clk);
828 if (ret) {
829 dev_err(dev, "cannot prepare/enable core clock\n");
830 goto err_clk_axi_m;
831 }
832
833 ret = clk_prepare_enable(res->slave_clk);
834 if (ret) {
835 dev_err(dev, "cannot prepare/enable phy clock\n");
836 goto err_clk_axi_s;
837 }
838
839 /* enable PCIe clocks and resets */
840 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
841 val &= !BIT(0);
842 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
843
844 /* change DBI base address */
845 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
846
847 /* MAC PHY_POWERDOWN MUX DISABLE */
848 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
849 val &= ~BIT(29);
850 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
851
852 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
853 val |= BIT(4);
854 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
855
856 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
857 val |= BIT(31);
858 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
859
860 return 0;
861
862err_clk_axi_s:
578 clk_disable_unprepare(res->master_clk); 863 clk_disable_unprepare(res->master_clk);
579 clk_disable_unprepare(res->cfg_clk); 864err_clk_axi_m:
580 clk_disable_unprepare(res->aux_clk); 865 clk_disable_unprepare(res->aux_clk);
866err_clk_aux:
867 reset_control_assert(res->ahb_reset);
868err_rst_ahb:
869 reset_control_assert(res->pwr_reset);
870err_rst_pwr:
871 reset_control_assert(res->axi_s_reset);
872err_rst_axi_s:
873 reset_control_assert(res->axi_m_sticky_reset);
874err_rst_axi_m_sticky:
875 reset_control_assert(res->axi_m_reset);
876err_rst_axi_m:
877 reset_control_assert(res->pipe_sticky_reset);
878err_rst_pipe_sticky:
879 reset_control_assert(res->pipe_reset);
880err_rst_pipe:
881 reset_control_assert(res->phy_reset);
882err_rst_phy:
883 reset_control_assert(res->phy_ahb_reset);
884 return ret;
885}
886
887static int qcom_pcie_link_up(struct dw_pcie *pci)
888{
889 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
890
891 return !!(val & PCI_EXP_LNKSTA_DLLLA);
581} 892}
582 893
583static void qcom_pcie_host_init(struct pcie_port *pp) 894static void qcom_pcie_host_init(struct pcie_port *pp)
@@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
634 return dw_pcie_read(pci->dbi_base + where, size, val); 945 return dw_pcie_read(pci->dbi_base + where, size, val);
635} 946}
636 947
637static struct dw_pcie_host_ops qcom_pcie_dw_ops = { 948static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
638 .host_init = qcom_pcie_host_init, 949 .host_init = qcom_pcie_host_init,
639 .rd_own_conf = qcom_pcie_rd_own_conf, 950 .rd_own_conf = qcom_pcie_rd_own_conf,
640}; 951};
@@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = {
665 .link_up = qcom_pcie_link_up, 976 .link_up = qcom_pcie_link_up,
666}; 977};
667 978
979static const struct qcom_pcie_ops ops_v3 = {
980 .get_resources = qcom_pcie_get_resources_v3,
981 .init = qcom_pcie_init_v3,
982 .deinit = qcom_pcie_deinit_v3,
983 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
984};
985
668static int qcom_pcie_probe(struct platform_device *pdev) 986static int qcom_pcie_probe(struct platform_device *pdev)
669{ 987{
670 struct device *dev = &pdev->dev; 988 struct device *dev = &pdev->dev;
@@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
727 1045
728 ret = devm_request_irq(dev, pp->msi_irq, 1046 ret = devm_request_irq(dev, pp->msi_irq,
729 qcom_pcie_msi_irq_handler, 1047 qcom_pcie_msi_irq_handler,
730 IRQF_SHARED, "qcom-pcie-msi", pp); 1048 IRQF_SHARED | IRQF_NO_THREAD,
1049 "qcom-pcie-msi", pp);
731 if (ret) { 1050 if (ret) {
732 dev_err(dev, "cannot request msi irq\n"); 1051 dev_err(dev, "cannot request msi irq\n");
733 return ret; 1052 return ret;
@@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = {
754 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, 1073 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
755 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, 1074 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
756 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, 1075 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
1076 { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
757 { } 1077 { }
758}; 1078};
759 1079
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index 8ff36b3dbbdf..80897291e0fb 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp)
186 spear13xx_pcie_enable_interrupts(spear13xx_pcie); 186 spear13xx_pcie_enable_interrupts(spear13xx_pcie);
187} 187}
188 188
189static struct dw_pcie_host_ops spear13xx_pcie_host_ops = { 189static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
190 .host_init = spear13xx_pcie_host_init, 190 .host_init = spear13xx_pcie_host_init,
191}; 191};
192 192