diff options
Diffstat (limited to 'drivers/pci/dwc')
-rw-r--r-- | drivers/pci/dwc/Kconfig | 12 | ||||
-rw-r--r-- | drivers/pci/dwc/pci-dra7xx.c | 26 | ||||
-rw-r--r-- | drivers/pci/dwc/pci-exynos.c | 12 | ||||
-rw-r--r-- | drivers/pci/dwc/pci-imx6.c | 11 | ||||
-rw-r--r-- | drivers/pci/dwc/pci-keystone-dw.c | 14 | ||||
-rw-r--r-- | drivers/pci/dwc/pci-keystone.c | 10 | ||||
-rw-r--r-- | drivers/pci/dwc/pci-keystone.h | 4 | ||||
-rw-r--r-- | drivers/pci/dwc/pci-layerscape.c | 102 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-armada8k.c | 12 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-artpec6.c | 14 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-designware-ep.c | 9 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-designware-host.c | 17 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-designware-plat.c | 4 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-designware.c | 14 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-designware.h | 30 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-hisi.c | 5 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-kirin.c | 6 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-qcom.c | 409 | ||||
-rw-r--r-- | drivers/pci/dwc/pcie-spear13xx.c | 8 |
19 files changed, 507 insertions, 212 deletions
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index d275aadc47ee..22ec82fcdea2 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig | |||
@@ -25,7 +25,7 @@ config PCI_DRA7XX | |||
25 | work either as EP or RC. In order to enable host-specific features | 25 | work either as EP or RC. In order to enable host-specific features |
26 | PCI_DRA7XX_HOST must be selected and in order to enable device- | 26 | PCI_DRA7XX_HOST must be selected and in order to enable device- |
27 | specific features PCI_DRA7XX_EP must be selected. This uses | 27 | specific features PCI_DRA7XX_EP must be selected. This uses |
28 | the Designware core. | 28 | the DesignWare core. |
29 | 29 | ||
30 | if PCI_DRA7XX | 30 | if PCI_DRA7XX |
31 | 31 | ||
@@ -97,8 +97,8 @@ config PCI_KEYSTONE | |||
97 | select PCIE_DW_HOST | 97 | select PCIE_DW_HOST |
98 | help | 98 | help |
99 | Say Y here if you want to enable PCI controller support on Keystone | 99 | Say Y here if you want to enable PCI controller support on Keystone |
100 | SoCs. The PCI controller on Keystone is based on Designware hardware | 100 | SoCs. The PCI controller on Keystone is based on DesignWare hardware |
101 | and therefore the driver re-uses the Designware core functions to | 101 | and therefore the driver re-uses the DesignWare core functions to |
102 | implement the driver. | 102 | implement the driver. |
103 | 103 | ||
104 | config PCI_LAYERSCAPE | 104 | config PCI_LAYERSCAPE |
@@ -132,7 +132,7 @@ config PCIE_QCOM | |||
132 | select PCIE_DW_HOST | 132 | select PCIE_DW_HOST |
133 | help | 133 | help |
134 | Say Y here to enable PCIe controller support on Qualcomm SoCs. The | 134 | Say Y here to enable PCIe controller support on Qualcomm SoCs. The |
135 | PCIe controller uses the Designware core plus Qualcomm-specific | 135 | PCIe controller uses the DesignWare core plus Qualcomm-specific |
136 | hardware wrappers. | 136 | hardware wrappers. |
137 | 137 | ||
138 | config PCIE_ARMADA_8K | 138 | config PCIE_ARMADA_8K |
@@ -145,8 +145,8 @@ config PCIE_ARMADA_8K | |||
145 | help | 145 | help |
146 | Say Y here if you want to enable PCIe controller support on | 146 | Say Y here if you want to enable PCIe controller support on |
147 | Armada-8K SoCs. The PCIe controller on Armada-8K is based on | 147 | Armada-8K SoCs. The PCIe controller on Armada-8K is based on |
148 | Designware hardware and therefore the driver re-uses the | 148 | DesignWare hardware and therefore the driver re-uses the |
149 | Designware core functions to implement the driver. | 149 | DesignWare core functions to implement the driver. |
150 | 150 | ||
151 | config PCIE_ARTPEC6 | 151 | config PCIE_ARTPEC6 |
152 | bool "Axis ARTPEC-6 PCIe controller" | 152 | bool "Axis ARTPEC-6 PCIe controller" |
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index f2fc5f47064e..34427a6a15af 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c | |||
@@ -195,7 +195,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) | |||
195 | dra7xx_pcie_enable_msi_interrupts(dra7xx); | 195 | dra7xx_pcie_enable_msi_interrupts(dra7xx); |
196 | } | 196 | } |
197 | 197 | ||
198 | static void dra7xx_pcie_host_init(struct pcie_port *pp) | 198 | static int dra7xx_pcie_host_init(struct pcie_port *pp) |
199 | { | 199 | { |
200 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 200 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
201 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | 201 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
@@ -206,6 +206,8 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp) | |||
206 | dw_pcie_wait_for_link(pci); | 206 | dw_pcie_wait_for_link(pci); |
207 | dw_pcie_msi_init(pp); | 207 | dw_pcie_msi_init(pp); |
208 | dra7xx_pcie_enable_interrupts(dra7xx); | 208 | dra7xx_pcie_enable_interrupts(dra7xx); |
209 | |||
210 | return 0; | ||
209 | } | 211 | } |
210 | 212 | ||
211 | static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { | 213 | static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { |
@@ -238,7 +240,7 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) | |||
238 | return -ENODEV; | 240 | return -ENODEV; |
239 | } | 241 | } |
240 | 242 | ||
241 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, | 243 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, |
242 | &intx_domain_ops, pp); | 244 | &intx_domain_ops, pp); |
243 | if (!dra7xx->irq_domain) { | 245 | if (!dra7xx->irq_domain) { |
244 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | 246 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); |
@@ -275,7 +277,6 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) | |||
275 | return IRQ_HANDLED; | 277 | return IRQ_HANDLED; |
276 | } | 278 | } |
277 | 279 | ||
278 | |||
279 | static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) | 280 | static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) |
280 | { | 281 | { |
281 | struct dra7xx_pcie *dra7xx = arg; | 282 | struct dra7xx_pcie *dra7xx = arg; |
@@ -335,10 +336,23 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) | |||
335 | return IRQ_HANDLED; | 336 | return IRQ_HANDLED; |
336 | } | 337 | } |
337 | 338 | ||
339 | static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) | ||
340 | { | ||
341 | u32 reg; | ||
342 | |||
343 | reg = PCI_BASE_ADDRESS_0 + (4 * bar); | ||
344 | dw_pcie_writel_dbi2(pci, reg, 0x0); | ||
345 | dw_pcie_writel_dbi(pci, reg, 0x0); | ||
346 | } | ||
347 | |||
338 | static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) | 348 | static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) |
339 | { | 349 | { |
340 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 350 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
341 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | 351 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
352 | enum pci_barno bar; | ||
353 | |||
354 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
355 | dw_pcie_ep_reset_bar(pci, bar); | ||
342 | 356 | ||
343 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | 357 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); |
344 | } | 358 | } |
@@ -435,7 +449,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, | |||
435 | pp->irq = platform_get_irq(pdev, 1); | 449 | pp->irq = platform_get_irq(pdev, 1); |
436 | if (pp->irq < 0) { | 450 | if (pp->irq < 0) { |
437 | dev_err(dev, "missing IRQ resource\n"); | 451 | dev_err(dev, "missing IRQ resource\n"); |
438 | return -EINVAL; | 452 | return pp->irq; |
439 | } | 453 | } |
440 | 454 | ||
441 | ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, | 455 | ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, |
@@ -616,8 +630,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) | |||
616 | 630 | ||
617 | irq = platform_get_irq(pdev, 0); | 631 | irq = platform_get_irq(pdev, 0); |
618 | if (irq < 0) { | 632 | if (irq < 0) { |
619 | dev_err(dev, "missing IRQ resource\n"); | 633 | dev_err(dev, "missing IRQ resource: %d\n", irq); |
620 | return -EINVAL; | 634 | return irq; |
621 | } | 635 | } |
622 | 636 | ||
623 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); | 637 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); |
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index c78c06552590..5596fdedbb94 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c | |||
@@ -581,13 +581,15 @@ static int exynos_pcie_link_up(struct dw_pcie *pci) | |||
581 | return 0; | 581 | return 0; |
582 | } | 582 | } |
583 | 583 | ||
584 | static void exynos_pcie_host_init(struct pcie_port *pp) | 584 | static int exynos_pcie_host_init(struct pcie_port *pp) |
585 | { | 585 | { |
586 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 586 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
587 | struct exynos_pcie *ep = to_exynos_pcie(pci); | 587 | struct exynos_pcie *ep = to_exynos_pcie(pci); |
588 | 588 | ||
589 | exynos_pcie_establish_link(ep); | 589 | exynos_pcie_establish_link(ep); |
590 | exynos_pcie_enable_interrupts(ep); | 590 | exynos_pcie_enable_interrupts(ep); |
591 | |||
592 | return 0; | ||
591 | } | 593 | } |
592 | 594 | ||
593 | static const struct dw_pcie_host_ops exynos_pcie_host_ops = { | 595 | static const struct dw_pcie_host_ops exynos_pcie_host_ops = { |
@@ -605,9 +607,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, | |||
605 | int ret; | 607 | int ret; |
606 | 608 | ||
607 | pp->irq = platform_get_irq(pdev, 1); | 609 | pp->irq = platform_get_irq(pdev, 1); |
608 | if (!pp->irq) { | 610 | if (pp->irq < 0) { |
609 | dev_err(dev, "failed to get irq\n"); | 611 | dev_err(dev, "failed to get irq\n"); |
610 | return -ENODEV; | 612 | return pp->irq; |
611 | } | 613 | } |
612 | ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, | 614 | ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, |
613 | IRQF_SHARED, "exynos-pcie", ep); | 615 | IRQF_SHARED, "exynos-pcie", ep); |
@@ -618,9 +620,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, | |||
618 | 620 | ||
619 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 621 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
620 | pp->msi_irq = platform_get_irq(pdev, 0); | 622 | pp->msi_irq = platform_get_irq(pdev, 0); |
621 | if (!pp->msi_irq) { | 623 | if (pp->msi_irq < 0) { |
622 | dev_err(dev, "failed to get msi irq\n"); | 624 | dev_err(dev, "failed to get msi irq\n"); |
623 | return -ENODEV; | 625 | return pp->msi_irq; |
624 | } | 626 | } |
625 | 627 | ||
626 | ret = devm_request_irq(dev, pp->msi_irq, | 628 | ret = devm_request_irq(dev, pp->msi_irq, |
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index bf5c3616e344..b73483534a5b 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c | |||
@@ -636,7 +636,7 @@ err_reset_phy: | |||
636 | return ret; | 636 | return ret; |
637 | } | 637 | } |
638 | 638 | ||
639 | static void imx6_pcie_host_init(struct pcie_port *pp) | 639 | static int imx6_pcie_host_init(struct pcie_port *pp) |
640 | { | 640 | { |
641 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 641 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
642 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); | 642 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); |
@@ -649,6 +649,8 @@ static void imx6_pcie_host_init(struct pcie_port *pp) | |||
649 | 649 | ||
650 | if (IS_ENABLED(CONFIG_PCI_MSI)) | 650 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
651 | dw_pcie_msi_init(pp); | 651 | dw_pcie_msi_init(pp); |
652 | |||
653 | return 0; | ||
652 | } | 654 | } |
653 | 655 | ||
654 | static int imx6_pcie_link_up(struct dw_pcie *pci) | 656 | static int imx6_pcie_link_up(struct dw_pcie *pci) |
@@ -778,14 +780,15 @@ static int imx6_pcie_probe(struct platform_device *pdev) | |||
778 | } | 780 | } |
779 | break; | 781 | break; |
780 | case IMX7D: | 782 | case IMX7D: |
781 | imx6_pcie->pciephy_reset = devm_reset_control_get(dev, | 783 | imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, |
782 | "pciephy"); | 784 | "pciephy"); |
783 | if (IS_ERR(imx6_pcie->pciephy_reset)) { | 785 | if (IS_ERR(imx6_pcie->pciephy_reset)) { |
784 | dev_err(dev, "Failed to get PCIEPHY reset control\n"); | 786 | dev_err(dev, "Failed to get PCIEPHY reset control\n"); |
785 | return PTR_ERR(imx6_pcie->pciephy_reset); | 787 | return PTR_ERR(imx6_pcie->pciephy_reset); |
786 | } | 788 | } |
787 | 789 | ||
788 | imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps"); | 790 | imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, |
791 | "apps"); | ||
789 | if (IS_ERR(imx6_pcie->apps_reset)) { | 792 | if (IS_ERR(imx6_pcie->apps_reset)) { |
790 | dev_err(dev, "Failed to get PCIE APPS reset control\n"); | 793 | dev_err(dev, "Failed to get PCIE APPS reset control\n"); |
791 | return PTR_ERR(imx6_pcie->apps_reset); | 794 | return PTR_ERR(imx6_pcie->apps_reset); |
diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c index 8bc626e640c8..2fb20b887d2a 100644 --- a/drivers/pci/dwc/pci-keystone-dw.c +++ b/drivers/pci/dwc/pci-keystone-dw.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Designware application register space functions for Keystone PCI controller | 2 | * DesignWare application register space functions for Keystone PCI controller |
3 | * | 3 | * |
4 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | 4 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. |
5 | * http://www.ti.com | 5 | * http://www.ti.com |
@@ -168,16 +168,12 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) | |||
168 | 168 | ||
169 | static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) | 169 | static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) |
170 | { | 170 | { |
171 | struct keystone_pcie *ks_pcie; | ||
172 | struct msi_desc *msi; | 171 | struct msi_desc *msi; |
173 | struct pcie_port *pp; | 172 | struct pcie_port *pp; |
174 | struct dw_pcie *pci; | ||
175 | u32 offset; | 173 | u32 offset; |
176 | 174 | ||
177 | msi = irq_data_get_msi_desc(d); | 175 | msi = irq_data_get_msi_desc(d); |
178 | pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); | 176 | pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); |
179 | pci = to_dw_pcie_from_pp(pp); | ||
180 | ks_pcie = to_keystone_pcie(pci); | ||
181 | offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); | 177 | offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); |
182 | 178 | ||
183 | /* Mask the end point if PVM implemented */ | 179 | /* Mask the end point if PVM implemented */ |
@@ -191,16 +187,12 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) | |||
191 | 187 | ||
192 | static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) | 188 | static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) |
193 | { | 189 | { |
194 | struct keystone_pcie *ks_pcie; | ||
195 | struct msi_desc *msi; | 190 | struct msi_desc *msi; |
196 | struct pcie_port *pp; | 191 | struct pcie_port *pp; |
197 | struct dw_pcie *pci; | ||
198 | u32 offset; | 192 | u32 offset; |
199 | 193 | ||
200 | msi = irq_data_get_msi_desc(d); | 194 | msi = irq_data_get_msi_desc(d); |
201 | pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); | 195 | pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); |
202 | pci = to_dw_pcie_from_pp(pp); | ||
203 | ks_pcie = to_keystone_pcie(pci); | ||
204 | offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); | 196 | offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); |
205 | 197 | ||
206 | /* Mask the end point if PVM implemented */ | 198 | /* Mask the end point if PVM implemented */ |
@@ -259,7 +251,7 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) | |||
259 | { | 251 | { |
260 | int i; | 252 | int i; |
261 | 253 | ||
262 | for (i = 0; i < MAX_LEGACY_IRQS; i++) | 254 | for (i = 0; i < PCI_NUM_INTX; i++) |
263 | ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); | 255 | ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); |
264 | } | 256 | } |
265 | 257 | ||
@@ -565,7 +557,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, | |||
565 | /* Create legacy IRQ domain */ | 557 | /* Create legacy IRQ domain */ |
566 | ks_pcie->legacy_irq_domain = | 558 | ks_pcie->legacy_irq_domain = |
567 | irq_domain_add_linear(ks_pcie->legacy_intc_np, | 559 | irq_domain_add_linear(ks_pcie->legacy_intc_np, |
568 | MAX_LEGACY_IRQS, | 560 | PCI_NUM_INTX, |
569 | &ks_dw_pcie_legacy_irq_domain_ops, | 561 | &ks_dw_pcie_legacy_irq_domain_ops, |
570 | NULL); | 562 | NULL); |
571 | if (!ks_pcie->legacy_irq_domain) { | 563 | if (!ks_pcie->legacy_irq_domain) { |
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index 4783cec1f78d..5bee3af47588 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c | |||
@@ -32,10 +32,6 @@ | |||
32 | 32 | ||
33 | #define DRIVER_NAME "keystone-pcie" | 33 | #define DRIVER_NAME "keystone-pcie" |
34 | 34 | ||
35 | /* driver specific constants */ | ||
36 | #define MAX_MSI_HOST_IRQS 8 | ||
37 | #define MAX_LEGACY_HOST_IRQS 4 | ||
38 | |||
39 | /* DEV_STAT_CTRL */ | 35 | /* DEV_STAT_CTRL */ |
40 | #define PCIE_CAP_BASE 0x70 | 36 | #define PCIE_CAP_BASE 0x70 |
41 | 37 | ||
@@ -173,7 +169,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | |||
173 | 169 | ||
174 | if (legacy) { | 170 | if (legacy) { |
175 | np_temp = &ks_pcie->legacy_intc_np; | 171 | np_temp = &ks_pcie->legacy_intc_np; |
176 | max_host_irqs = MAX_LEGACY_HOST_IRQS; | 172 | max_host_irqs = PCI_NUM_INTX; |
177 | host_irqs = &ks_pcie->legacy_host_irqs[0]; | 173 | host_irqs = &ks_pcie->legacy_host_irqs[0]; |
178 | } else { | 174 | } else { |
179 | np_temp = &ks_pcie->msi_intc_np; | 175 | np_temp = &ks_pcie->msi_intc_np; |
@@ -261,7 +257,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, | |||
261 | return 0; | 257 | return 0; |
262 | } | 258 | } |
263 | 259 | ||
264 | static void __init ks_pcie_host_init(struct pcie_port *pp) | 260 | static int __init ks_pcie_host_init(struct pcie_port *pp) |
265 | { | 261 | { |
266 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 262 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
267 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 263 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
@@ -289,6 +285,8 @@ static void __init ks_pcie_host_init(struct pcie_port *pp) | |||
289 | */ | 285 | */ |
290 | hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, | 286 | hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, |
291 | "Asynchronous external abort"); | 287 | "Asynchronous external abort"); |
288 | |||
289 | return 0; | ||
292 | } | 290 | } |
293 | 291 | ||
294 | static const struct dw_pcie_host_ops keystone_pcie_host_ops = { | 292 | static const struct dw_pcie_host_ops keystone_pcie_host_ops = { |
diff --git a/drivers/pci/dwc/pci-keystone.h b/drivers/pci/dwc/pci-keystone.h index 74c5825882df..30b7bc2ac380 100644 --- a/drivers/pci/dwc/pci-keystone.h +++ b/drivers/pci/dwc/pci-keystone.h | |||
@@ -12,9 +12,7 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define MAX_LEGACY_IRQS 4 | ||
16 | #define MAX_MSI_HOST_IRQS 8 | 15 | #define MAX_MSI_HOST_IRQS 8 |
17 | #define MAX_LEGACY_HOST_IRQS 4 | ||
18 | 16 | ||
19 | struct keystone_pcie { | 17 | struct keystone_pcie { |
20 | struct dw_pcie *pci; | 18 | struct dw_pcie *pci; |
@@ -22,7 +20,7 @@ struct keystone_pcie { | |||
22 | /* PCI Device ID */ | 20 | /* PCI Device ID */ |
23 | u32 device_id; | 21 | u32 device_id; |
24 | int num_legacy_host_irqs; | 22 | int num_legacy_host_irqs; |
25 | int legacy_host_irqs[MAX_LEGACY_HOST_IRQS]; | 23 | int legacy_host_irqs[PCI_NUM_INTX]; |
26 | struct device_node *legacy_intc_np; | 24 | struct device_node *legacy_intc_np; |
27 | 25 | ||
28 | int num_msi_host_irqs; | 26 | int num_msi_host_irqs; |
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index fd861289ad8b..87fa486bee2c 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c | |||
@@ -33,7 +33,8 @@ | |||
33 | 33 | ||
34 | /* PEX Internal Configuration Registers */ | 34 | /* PEX Internal Configuration Registers */ |
35 | #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ | 35 | #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ |
36 | #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ | 36 | |
37 | #define PCIE_IATU_NUM 6 | ||
37 | 38 | ||
38 | struct ls_pcie_drvdata { | 39 | struct ls_pcie_drvdata { |
39 | u32 lut_offset; | 40 | u32 lut_offset; |
@@ -72,14 +73,6 @@ static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) | |||
72 | iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); | 73 | iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); |
73 | } | 74 | } |
74 | 75 | ||
75 | /* Fix class value */ | ||
76 | static void ls_pcie_fix_class(struct ls_pcie *pcie) | ||
77 | { | ||
78 | struct dw_pcie *pci = pcie->pci; | ||
79 | |||
80 | iowrite16(PCI_CLASS_BRIDGE_PCI, pci->dbi_base + PCI_CLASS_DEVICE); | ||
81 | } | ||
82 | |||
83 | /* Drop MSG TLP except for Vendor MSG */ | 76 | /* Drop MSG TLP except for Vendor MSG */ |
84 | static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) | 77 | static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) |
85 | { | 78 | { |
@@ -91,6 +84,14 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) | |||
91 | iowrite32(val, pci->dbi_base + PCIE_STRFMR1); | 84 | iowrite32(val, pci->dbi_base + PCIE_STRFMR1); |
92 | } | 85 | } |
93 | 86 | ||
87 | static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) | ||
88 | { | ||
89 | int i; | ||
90 | |||
91 | for (i = 0; i < PCIE_IATU_NUM; i++) | ||
92 | dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); | ||
93 | } | ||
94 | |||
94 | static int ls1021_pcie_link_up(struct dw_pcie *pci) | 95 | static int ls1021_pcie_link_up(struct dw_pcie *pci) |
95 | { | 96 | { |
96 | u32 state; | 97 | u32 state; |
@@ -108,33 +109,6 @@ static int ls1021_pcie_link_up(struct dw_pcie *pci) | |||
108 | return 1; | 109 | return 1; |
109 | } | 110 | } |
110 | 111 | ||
111 | static void ls1021_pcie_host_init(struct pcie_port *pp) | ||
112 | { | ||
113 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
114 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
115 | struct device *dev = pci->dev; | ||
116 | u32 index[2]; | ||
117 | |||
118 | pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, | ||
119 | "fsl,pcie-scfg"); | ||
120 | if (IS_ERR(pcie->scfg)) { | ||
121 | dev_err(dev, "No syscfg phandle specified\n"); | ||
122 | pcie->scfg = NULL; | ||
123 | return; | ||
124 | } | ||
125 | |||
126 | if (of_property_read_u32_array(dev->of_node, | ||
127 | "fsl,pcie-scfg", index, 2)) { | ||
128 | pcie->scfg = NULL; | ||
129 | return; | ||
130 | } | ||
131 | pcie->index = index[1]; | ||
132 | |||
133 | dw_pcie_setup_rc(pp); | ||
134 | |||
135 | ls_pcie_drop_msg_tlp(pcie); | ||
136 | } | ||
137 | |||
138 | static int ls_pcie_link_up(struct dw_pcie *pci) | 112 | static int ls_pcie_link_up(struct dw_pcie *pci) |
139 | { | 113 | { |
140 | struct ls_pcie *pcie = to_ls_pcie(pci); | 114 | struct ls_pcie *pcie = to_ls_pcie(pci); |
@@ -150,16 +124,54 @@ static int ls_pcie_link_up(struct dw_pcie *pci) | |||
150 | return 1; | 124 | return 1; |
151 | } | 125 | } |
152 | 126 | ||
153 | static void ls_pcie_host_init(struct pcie_port *pp) | 127 | static int ls_pcie_host_init(struct pcie_port *pp) |
154 | { | 128 | { |
155 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 129 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
156 | struct ls_pcie *pcie = to_ls_pcie(pci); | 130 | struct ls_pcie *pcie = to_ls_pcie(pci); |
157 | 131 | ||
158 | iowrite32(1, pci->dbi_base + PCIE_DBI_RO_WR_EN); | 132 | /* |
159 | ls_pcie_fix_class(pcie); | 133 | * Disable outbound windows configured by the bootloader to avoid |
134 | * one transaction hitting multiple outbound windows. | ||
135 | * dw_pcie_setup_rc() will reconfigure the outbound windows. | ||
136 | */ | ||
137 | ls_pcie_disable_outbound_atus(pcie); | ||
138 | |||
139 | dw_pcie_dbi_ro_wr_en(pci); | ||
160 | ls_pcie_clear_multifunction(pcie); | 140 | ls_pcie_clear_multifunction(pcie); |
141 | dw_pcie_dbi_ro_wr_dis(pci); | ||
142 | |||
161 | ls_pcie_drop_msg_tlp(pcie); | 143 | ls_pcie_drop_msg_tlp(pcie); |
162 | iowrite32(0, pci->dbi_base + PCIE_DBI_RO_WR_EN); | 144 | |
145 | dw_pcie_setup_rc(pp); | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int ls1021_pcie_host_init(struct pcie_port *pp) | ||
151 | { | ||
152 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
153 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
154 | struct device *dev = pci->dev; | ||
155 | u32 index[2]; | ||
156 | int ret; | ||
157 | |||
158 | pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, | ||
159 | "fsl,pcie-scfg"); | ||
160 | if (IS_ERR(pcie->scfg)) { | ||
161 | ret = PTR_ERR(pcie->scfg); | ||
162 | dev_err(dev, "No syscfg phandle specified\n"); | ||
163 | pcie->scfg = NULL; | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | if (of_property_read_u32_array(dev->of_node, | ||
168 | "fsl,pcie-scfg", index, 2)) { | ||
169 | pcie->scfg = NULL; | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | pcie->index = index[1]; | ||
173 | |||
174 | return ls_pcie_host_init(pp); | ||
163 | } | 175 | } |
164 | 176 | ||
165 | static int ls_pcie_msi_host_init(struct pcie_port *pp, | 177 | static int ls_pcie_msi_host_init(struct pcie_port *pp, |
@@ -232,12 +244,22 @@ static struct ls_pcie_drvdata ls2080_drvdata = { | |||
232 | .dw_pcie_ops = &dw_ls_pcie_ops, | 244 | .dw_pcie_ops = &dw_ls_pcie_ops, |
233 | }; | 245 | }; |
234 | 246 | ||
247 | static struct ls_pcie_drvdata ls2088_drvdata = { | ||
248 | .lut_offset = 0x80000, | ||
249 | .ltssm_shift = 0, | ||
250 | .lut_dbg = 0x407fc, | ||
251 | .ops = &ls_pcie_host_ops, | ||
252 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
253 | }; | ||
254 | |||
235 | static const struct of_device_id ls_pcie_of_match[] = { | 255 | static const struct of_device_id ls_pcie_of_match[] = { |
236 | { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, | 256 | { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, |
237 | { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, | 257 | { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, |
238 | { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, | 258 | { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, |
239 | { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, | 259 | { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, |
240 | { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, | 260 | { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, |
261 | { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, | ||
262 | { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, | ||
241 | { }, | 263 | { }, |
242 | }; | 264 | }; |
243 | 265 | ||
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index ea8f34af6a85..370d057c0046 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c | |||
@@ -134,13 +134,15 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) | |||
134 | dev_err(pci->dev, "Link not up after reconfiguration\n"); | 134 | dev_err(pci->dev, "Link not up after reconfiguration\n"); |
135 | } | 135 | } |
136 | 136 | ||
137 | static void armada8k_pcie_host_init(struct pcie_port *pp) | 137 | static int armada8k_pcie_host_init(struct pcie_port *pp) |
138 | { | 138 | { |
139 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 139 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
140 | struct armada8k_pcie *pcie = to_armada8k_pcie(pci); | 140 | struct armada8k_pcie *pcie = to_armada8k_pcie(pci); |
141 | 141 | ||
142 | dw_pcie_setup_rc(pp); | 142 | dw_pcie_setup_rc(pp); |
143 | armada8k_pcie_establish_link(pcie); | 143 | armada8k_pcie_establish_link(pcie); |
144 | |||
145 | return 0; | ||
144 | } | 146 | } |
145 | 147 | ||
146 | static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) | 148 | static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) |
@@ -176,9 +178,9 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, | |||
176 | pp->ops = &armada8k_pcie_host_ops; | 178 | pp->ops = &armada8k_pcie_host_ops; |
177 | 179 | ||
178 | pp->irq = platform_get_irq(pdev, 0); | 180 | pp->irq = platform_get_irq(pdev, 0); |
179 | if (!pp->irq) { | 181 | if (pp->irq < 0) { |
180 | dev_err(dev, "failed to get irq for port\n"); | 182 | dev_err(dev, "failed to get irq for port\n"); |
181 | return -ENODEV; | 183 | return pp->irq; |
182 | } | 184 | } |
183 | 185 | ||
184 | ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, | 186 | ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, |
@@ -226,7 +228,9 @@ static int armada8k_pcie_probe(struct platform_device *pdev) | |||
226 | if (IS_ERR(pcie->clk)) | 228 | if (IS_ERR(pcie->clk)) |
227 | return PTR_ERR(pcie->clk); | 229 | return PTR_ERR(pcie->clk); |
228 | 230 | ||
229 | clk_prepare_enable(pcie->clk); | 231 | ret = clk_prepare_enable(pcie->clk); |
232 | if (ret) | ||
233 | return ret; | ||
230 | 234 | ||
231 | /* Get the dw-pcie unit configuration/control registers base. */ | 235 | /* Get the dw-pcie unit configuration/control registers base. */ |
232 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); | 236 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); |
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 01c6f7823672..6653619db6a1 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c | |||
@@ -141,12 +141,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) | |||
141 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | 141 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); |
142 | usleep_range(100, 200); | 142 | usleep_range(100, 200); |
143 | 143 | ||
144 | /* | ||
145 | * Enable writing to config regs. This is required as the Synopsys | ||
146 | * driver changes the class code. That register needs DBI write enable. | ||
147 | */ | ||
148 | dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); | ||
149 | |||
150 | /* setup root complex */ | 144 | /* setup root complex */ |
151 | dw_pcie_setup_rc(pp); | 145 | dw_pcie_setup_rc(pp); |
152 | 146 | ||
@@ -175,13 +169,15 @@ static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) | |||
175 | dw_pcie_msi_init(pp); | 169 | dw_pcie_msi_init(pp); |
176 | } | 170 | } |
177 | 171 | ||
178 | static void artpec6_pcie_host_init(struct pcie_port *pp) | 172 | static int artpec6_pcie_host_init(struct pcie_port *pp) |
179 | { | 173 | { |
180 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 174 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
181 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | 175 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); |
182 | 176 | ||
183 | artpec6_pcie_establish_link(artpec6_pcie); | 177 | artpec6_pcie_establish_link(artpec6_pcie); |
184 | artpec6_pcie_enable_interrupts(artpec6_pcie); | 178 | artpec6_pcie_enable_interrupts(artpec6_pcie); |
179 | |||
180 | return 0; | ||
185 | } | 181 | } |
186 | 182 | ||
187 | static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { | 183 | static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { |
@@ -207,9 +203,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, | |||
207 | 203 | ||
208 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 204 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
209 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | 205 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); |
210 | if (pp->msi_irq <= 0) { | 206 | if (pp->msi_irq < 0) { |
211 | dev_err(dev, "failed to get MSI irq\n"); | 207 | dev_err(dev, "failed to get MSI irq\n"); |
212 | return -ENODEV; | 208 | return pp->msi_irq; |
213 | } | 209 | } |
214 | 210 | ||
215 | ret = devm_request_irq(dev, pp->msi_irq, | 211 | ret = devm_request_irq(dev, pp->msi_irq, |
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c index 398406393f37..d53d5f168363 100644 --- a/drivers/pci/dwc/pcie-designware-ep.c +++ b/drivers/pci/dwc/pcie-designware-ep.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * Synopsys Designware PCIe Endpoint controller driver | 2 | * Synopsys DesignWare PCIe Endpoint controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2017 Texas Instruments | 4 | * Copyright (C) 2017 Texas Instruments |
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | 5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> |
@@ -283,7 +283,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
283 | { | 283 | { |
284 | int ret; | 284 | int ret; |
285 | void *addr; | 285 | void *addr; |
286 | enum pci_barno bar; | ||
287 | struct pci_epc *epc; | 286 | struct pci_epc *epc; |
288 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 287 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
289 | struct device *dev = pci->dev; | 288 | struct device *dev = pci->dev; |
@@ -312,9 +311,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
312 | return -ENOMEM; | 311 | return -ENOMEM; |
313 | ep->outbound_addr = addr; | 312 | ep->outbound_addr = addr; |
314 | 313 | ||
315 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
316 | dw_pcie_ep_reset_bar(pci, bar); | ||
317 | |||
318 | if (ep->ops->ep_init) | 314 | if (ep->ops->ep_init) |
319 | ep->ops->ep_init(ep); | 315 | ep->ops->ep_init(ep); |
320 | 316 | ||
@@ -328,7 +324,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
328 | if (ret < 0) | 324 | if (ret < 0) |
329 | epc->max_functions = 1; | 325 | epc->max_functions = 1; |
330 | 326 | ||
331 | ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size); | 327 | ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, |
328 | ep->page_size); | ||
332 | if (ret < 0) { | 329 | if (ret < 0) { |
333 | dev_err(dev, "Failed to initialize address space\n"); | 330 | dev_err(dev, "Failed to initialize address space\n"); |
334 | return ret; | 331 | return ret; |
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index d29c020da082..81e2157a7cfb 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Synopsys Designware PCIe host controller driver | 2 | * Synopsys DesignWare PCIe host controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | 4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. |
5 | * http://www.samsung.com | 5 | * http://www.samsung.com |
@@ -71,9 +71,9 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) | |||
71 | while ((pos = find_next_bit((unsigned long *) &val, 32, | 71 | while ((pos = find_next_bit((unsigned long *) &val, 32, |
72 | pos)) != 32) { | 72 | pos)) != 32) { |
73 | irq = irq_find_mapping(pp->irq_domain, i * 32 + pos); | 73 | irq = irq_find_mapping(pp->irq_domain, i * 32 + pos); |
74 | generic_handle_irq(irq); | ||
74 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, | 75 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, |
75 | 4, 1 << pos); | 76 | 4, 1 << pos); |
76 | generic_handle_irq(irq); | ||
77 | pos++; | 77 | pos++; |
78 | } | 78 | } |
79 | } | 79 | } |
@@ -401,8 +401,11 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
401 | } | 401 | } |
402 | } | 402 | } |
403 | 403 | ||
404 | if (pp->ops->host_init) | 404 | if (pp->ops->host_init) { |
405 | pp->ops->host_init(pp); | 405 | ret = pp->ops->host_init(pp); |
406 | if (ret) | ||
407 | goto error; | ||
408 | } | ||
406 | 409 | ||
407 | pp->root_bus_nr = pp->busn->start; | 410 | pp->root_bus_nr = pp->busn->start; |
408 | 411 | ||
@@ -594,10 +597,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
594 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); | 597 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); |
595 | 598 | ||
596 | /* setup interrupt pins */ | 599 | /* setup interrupt pins */ |
600 | dw_pcie_dbi_ro_wr_en(pci); | ||
597 | val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); | 601 | val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); |
598 | val &= 0xffff00ff; | 602 | val &= 0xffff00ff; |
599 | val |= 0x00000100; | 603 | val |= 0x00000100; |
600 | dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); | 604 | dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); |
605 | dw_pcie_dbi_ro_wr_dis(pci); | ||
601 | 606 | ||
602 | /* setup bus numbers */ | 607 | /* setup bus numbers */ |
603 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); | 608 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); |
@@ -634,8 +639,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
634 | 639 | ||
635 | dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); | 640 | dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); |
636 | 641 | ||
642 | /* Enable write permission for the DBI read-only register */ | ||
643 | dw_pcie_dbi_ro_wr_en(pci); | ||
637 | /* program correct class for RC */ | 644 | /* program correct class for RC */ |
638 | dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); | 645 | dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); |
646 | /* Better disable write permission right after the update */ | ||
647 | dw_pcie_dbi_ro_wr_dis(pci); | ||
639 | 648 | ||
640 | dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); | 649 | dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); |
641 | val |= PORT_LOGIC_SPEED_CHANGE; | 650 | val |= PORT_LOGIC_SPEED_CHANGE; |
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index 091b4e7ad059..168e2380f493 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c | |||
@@ -35,7 +35,7 @@ static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg) | |||
35 | return dw_handle_msi_irq(pp); | 35 | return dw_handle_msi_irq(pp); |
36 | } | 36 | } |
37 | 37 | ||
38 | static void dw_plat_pcie_host_init(struct pcie_port *pp) | 38 | static int dw_plat_pcie_host_init(struct pcie_port *pp) |
39 | { | 39 | { |
40 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 40 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
41 | 41 | ||
@@ -44,6 +44,8 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp) | |||
44 | 44 | ||
45 | if (IS_ENABLED(CONFIG_PCI_MSI)) | 45 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
46 | dw_pcie_msi_init(pp); | 46 | dw_pcie_msi_init(pp); |
47 | |||
48 | return 0; | ||
47 | } | 49 | } |
48 | 50 | ||
49 | static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { | 51 | static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { |
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c index 0e03af279259..88abdddee2ad 100644 --- a/drivers/pci/dwc/pcie-designware.c +++ b/drivers/pci/dwc/pcie-designware.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Synopsys Designware PCIe host controller driver | 2 | * Synopsys DesignWare PCIe host controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | 4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. |
5 | * http://www.samsung.com | 5 | * http://www.samsung.com |
@@ -107,8 +107,9 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, | |||
107 | dw_pcie_writel_dbi(pci, offset + reg, val); | 107 | dw_pcie_writel_dbi(pci, offset + reg, val); |
108 | } | 108 | } |
109 | 109 | ||
110 | void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type, | 110 | static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, |
111 | u64 cpu_addr, u64 pci_addr, u32 size) | 111 | int type, u64 cpu_addr, |
112 | u64 pci_addr, u32 size) | ||
112 | { | 113 | { |
113 | u32 retries, val; | 114 | u32 retries, val; |
114 | 115 | ||
@@ -177,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, | |||
177 | */ | 178 | */ |
178 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | 179 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { |
179 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); | 180 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); |
180 | if (val == PCIE_ATU_ENABLE) | 181 | if (val & PCIE_ATU_ENABLE) |
181 | return; | 182 | return; |
182 | 183 | ||
183 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 184 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); |
@@ -200,8 +201,9 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, | |||
200 | dw_pcie_writel_dbi(pci, offset + reg, val); | 201 | dw_pcie_writel_dbi(pci, offset + reg, val); |
201 | } | 202 | } |
202 | 203 | ||
203 | int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar, | 204 | static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, |
204 | u64 cpu_addr, enum dw_pcie_as_type as_type) | 205 | int bar, u64 cpu_addr, |
206 | enum dw_pcie_as_type as_type) | ||
205 | { | 207 | { |
206 | int type; | 208 | int type; |
207 | u32 retries, val; | 209 | u32 retries, val; |
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index b4d2a89f8e58..e5d9d77b778e 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Synopsys Designware PCIe host controller driver | 2 | * Synopsys DesignWare PCIe host controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | 4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. |
5 | * http://www.samsung.com | 5 | * http://www.samsung.com |
@@ -76,6 +76,9 @@ | |||
76 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) | 76 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) |
77 | #define PCIE_ATU_UPPER_TARGET 0x91C | 77 | #define PCIE_ATU_UPPER_TARGET 0x91C |
78 | 78 | ||
79 | #define PCIE_MISC_CONTROL_1_OFF 0x8BC | ||
80 | #define PCIE_DBI_RO_WR_EN (0x1 << 0) | ||
81 | |||
79 | /* | 82 | /* |
80 | * iATU Unroll-specific register definitions | 83 | * iATU Unroll-specific register definitions |
81 | * From 4.80 core version the address translation will be made by unroll | 84 | * From 4.80 core version the address translation will be made by unroll |
@@ -134,7 +137,7 @@ struct dw_pcie_host_ops { | |||
134 | unsigned int devfn, int where, int size, u32 *val); | 137 | unsigned int devfn, int where, int size, u32 *val); |
135 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, | 138 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, |
136 | unsigned int devfn, int where, int size, u32 val); | 139 | unsigned int devfn, int where, int size, u32 val); |
137 | void (*host_init)(struct pcie_port *pp); | 140 | int (*host_init)(struct pcie_port *pp); |
138 | void (*msi_set_irq)(struct pcie_port *pp, int irq); | 141 | void (*msi_set_irq)(struct pcie_port *pp, int irq); |
139 | void (*msi_clear_irq)(struct pcie_port *pp, int irq); | 142 | void (*msi_clear_irq)(struct pcie_port *pp, int irq); |
140 | phys_addr_t (*get_msi_addr)(struct pcie_port *pp); | 143 | phys_addr_t (*get_msi_addr)(struct pcie_port *pp); |
@@ -186,6 +189,7 @@ struct dw_pcie_ep { | |||
186 | struct dw_pcie_ep_ops *ops; | 189 | struct dw_pcie_ep_ops *ops; |
187 | phys_addr_t phys_base; | 190 | phys_addr_t phys_base; |
188 | size_t addr_size; | 191 | size_t addr_size; |
192 | size_t page_size; | ||
189 | u8 bar_to_atu[6]; | 193 | u8 bar_to_atu[6]; |
190 | phys_addr_t *outbound_addr; | 194 | phys_addr_t *outbound_addr; |
191 | unsigned long ib_window_map; | 195 | unsigned long ib_window_map; |
@@ -279,6 +283,28 @@ static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) | |||
279 | return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); | 283 | return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); |
280 | } | 284 | } |
281 | 285 | ||
286 | static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) | ||
287 | { | ||
288 | u32 reg; | ||
289 | u32 val; | ||
290 | |||
291 | reg = PCIE_MISC_CONTROL_1_OFF; | ||
292 | val = dw_pcie_readl_dbi(pci, reg); | ||
293 | val |= PCIE_DBI_RO_WR_EN; | ||
294 | dw_pcie_writel_dbi(pci, reg, val); | ||
295 | } | ||
296 | |||
297 | static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) | ||
298 | { | ||
299 | u32 reg; | ||
300 | u32 val; | ||
301 | |||
302 | reg = PCIE_MISC_CONTROL_1_OFF; | ||
303 | val = dw_pcie_readl_dbi(pci, reg); | ||
304 | val &= ~PCIE_DBI_RO_WR_EN; | ||
305 | dw_pcie_writel_dbi(pci, reg, val); | ||
306 | } | ||
307 | |||
282 | #ifdef CONFIG_PCIE_DW_HOST | 308 | #ifdef CONFIG_PCIE_DW_HOST |
283 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); | 309 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); |
284 | void dw_pcie_msi_init(struct pcie_port *pp); | 310 | void dw_pcie_msi_init(struct pcie_port *pp); |
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c index e51acee0ddf3..a20179169e06 100644 --- a/drivers/pci/dwc/pcie-hisi.c +++ b/drivers/pci/dwc/pcie-hisi.c | |||
@@ -223,7 +223,7 @@ static int hisi_pcie_link_up(struct dw_pcie *pci) | |||
223 | return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); | 223 | return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); |
224 | } | 224 | } |
225 | 225 | ||
226 | static struct dw_pcie_host_ops hisi_pcie_host_ops = { | 226 | static const struct dw_pcie_host_ops hisi_pcie_host_ops = { |
227 | .rd_own_conf = hisi_pcie_cfg_read, | 227 | .rd_own_conf = hisi_pcie_cfg_read, |
228 | .wr_own_conf = hisi_pcie_cfg_write, | 228 | .wr_own_conf = hisi_pcie_cfg_write, |
229 | }; | 229 | }; |
@@ -268,7 +268,6 @@ static int hisi_pcie_probe(struct platform_device *pdev) | |||
268 | struct dw_pcie *pci; | 268 | struct dw_pcie *pci; |
269 | struct hisi_pcie *hisi_pcie; | 269 | struct hisi_pcie *hisi_pcie; |
270 | struct resource *reg; | 270 | struct resource *reg; |
271 | struct device_driver *driver; | ||
272 | int ret; | 271 | int ret; |
273 | 272 | ||
274 | hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); | 273 | hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); |
@@ -282,8 +281,6 @@ static int hisi_pcie_probe(struct platform_device *pdev) | |||
282 | pci->dev = dev; | 281 | pci->dev = dev; |
283 | pci->ops = &dw_pcie_ops; | 282 | pci->ops = &dw_pcie_ops; |
284 | 283 | ||
285 | driver = dev->driver; | ||
286 | |||
287 | hisi_pcie->pci = pci; | 284 | hisi_pcie->pci = pci; |
288 | 285 | ||
289 | hisi_pcie->soc_ops = of_device_get_match_data(dev); | 286 | hisi_pcie->soc_ops = of_device_get_match_data(dev); |
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c index 33fddb9f6739..dc3033cf3c19 100644 --- a/drivers/pci/dwc/pcie-kirin.c +++ b/drivers/pci/dwc/pcie-kirin.c | |||
@@ -430,9 +430,11 @@ static int kirin_pcie_establish_link(struct pcie_port *pp) | |||
430 | return 0; | 430 | return 0; |
431 | } | 431 | } |
432 | 432 | ||
433 | static void kirin_pcie_host_init(struct pcie_port *pp) | 433 | static int kirin_pcie_host_init(struct pcie_port *pp) |
434 | { | 434 | { |
435 | kirin_pcie_establish_link(pp); | 435 | kirin_pcie_establish_link(pp); |
436 | |||
437 | return 0; | ||
436 | } | 438 | } |
437 | 439 | ||
438 | static struct dw_pcie_ops kirin_dw_pcie_ops = { | 440 | static struct dw_pcie_ops kirin_dw_pcie_ops = { |
@@ -441,7 +443,7 @@ static struct dw_pcie_ops kirin_dw_pcie_ops = { | |||
441 | .link_up = kirin_pcie_link_up, | 443 | .link_up = kirin_pcie_link_up, |
442 | }; | 444 | }; |
443 | 445 | ||
444 | static struct dw_pcie_host_ops kirin_pcie_host_ops = { | 446 | static const struct dw_pcie_host_ops kirin_pcie_host_ops = { |
445 | .rd_own_conf = kirin_pcie_rd_own_conf, | 447 | .rd_own_conf = kirin_pcie_rd_own_conf, |
446 | .wr_own_conf = kirin_pcie_wr_own_conf, | 448 | .wr_own_conf = kirin_pcie_wr_own_conf, |
447 | .host_init = kirin_pcie_host_init, | 449 | .host_init = kirin_pcie_host_init, |
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 68c5f2ab5bc8..ce7ba5b7552a 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c | |||
@@ -37,6 +37,20 @@ | |||
37 | #include "pcie-designware.h" | 37 | #include "pcie-designware.h" |
38 | 38 | ||
39 | #define PCIE20_PARF_SYS_CTRL 0x00 | 39 | #define PCIE20_PARF_SYS_CTRL 0x00 |
40 | #define MST_WAKEUP_EN BIT(13) | ||
41 | #define SLV_WAKEUP_EN BIT(12) | ||
42 | #define MSTR_ACLK_CGC_DIS BIT(10) | ||
43 | #define SLV_ACLK_CGC_DIS BIT(9) | ||
44 | #define CORE_CLK_CGC_DIS BIT(6) | ||
45 | #define AUX_PWR_DET BIT(4) | ||
46 | #define L23_CLK_RMV_DIS BIT(2) | ||
47 | #define L1_CLK_RMV_DIS BIT(1) | ||
48 | |||
49 | #define PCIE20_COMMAND_STATUS 0x04 | ||
50 | #define CMD_BME_VAL 0x4 | ||
51 | #define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 | ||
52 | #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 | ||
53 | |||
40 | #define PCIE20_PARF_PHY_CTRL 0x40 | 54 | #define PCIE20_PARF_PHY_CTRL 0x40 |
41 | #define PCIE20_PARF_PHY_REFCLK 0x4C | 55 | #define PCIE20_PARF_PHY_REFCLK 0x4C |
42 | #define PCIE20_PARF_DBI_BASE_ADDR 0x168 | 56 | #define PCIE20_PARF_DBI_BASE_ADDR 0x168 |
@@ -58,10 +72,22 @@ | |||
58 | #define CFG_BRIDGE_SB_INIT BIT(0) | 72 | #define CFG_BRIDGE_SB_INIT BIT(0) |
59 | 73 | ||
60 | #define PCIE20_CAP 0x70 | 74 | #define PCIE20_CAP 0x70 |
75 | #define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) | ||
76 | #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) | ||
77 | #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) | ||
78 | #define PCIE_CAP_LINK1_VAL 0x2FD7F | ||
79 | |||
80 | #define PCIE20_PARF_Q2A_FLUSH 0x1AC | ||
81 | |||
82 | #define PCIE20_MISC_CONTROL_1_REG 0x8BC | ||
83 | #define DBI_RO_WR_EN 1 | ||
61 | 84 | ||
62 | #define PERST_DELAY_US 1000 | 85 | #define PERST_DELAY_US 1000 |
63 | 86 | ||
64 | struct qcom_pcie_resources_v0 { | 87 | #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 |
88 | #define SLV_ADDR_SPACE_SZ 0x10000000 | ||
89 | |||
90 | struct qcom_pcie_resources_2_1_0 { | ||
65 | struct clk *iface_clk; | 91 | struct clk *iface_clk; |
66 | struct clk *core_clk; | 92 | struct clk *core_clk; |
67 | struct clk *phy_clk; | 93 | struct clk *phy_clk; |
@@ -75,7 +101,7 @@ struct qcom_pcie_resources_v0 { | |||
75 | struct regulator *vdda_refclk; | 101 | struct regulator *vdda_refclk; |
76 | }; | 102 | }; |
77 | 103 | ||
78 | struct qcom_pcie_resources_v1 { | 104 | struct qcom_pcie_resources_1_0_0 { |
79 | struct clk *iface; | 105 | struct clk *iface; |
80 | struct clk *aux; | 106 | struct clk *aux; |
81 | struct clk *master_bus; | 107 | struct clk *master_bus; |
@@ -84,7 +110,7 @@ struct qcom_pcie_resources_v1 { | |||
84 | struct regulator *vdda; | 110 | struct regulator *vdda; |
85 | }; | 111 | }; |
86 | 112 | ||
87 | struct qcom_pcie_resources_v2 { | 113 | struct qcom_pcie_resources_2_3_2 { |
88 | struct clk *aux_clk; | 114 | struct clk *aux_clk; |
89 | struct clk *master_clk; | 115 | struct clk *master_clk; |
90 | struct clk *slave_clk; | 116 | struct clk *slave_clk; |
@@ -92,7 +118,7 @@ struct qcom_pcie_resources_v2 { | |||
92 | struct clk *pipe_clk; | 118 | struct clk *pipe_clk; |
93 | }; | 119 | }; |
94 | 120 | ||
95 | struct qcom_pcie_resources_v3 { | 121 | struct qcom_pcie_resources_2_4_0 { |
96 | struct clk *aux_clk; | 122 | struct clk *aux_clk; |
97 | struct clk *master_clk; | 123 | struct clk *master_clk; |
98 | struct clk *slave_clk; | 124 | struct clk *slave_clk; |
@@ -110,11 +136,21 @@ struct qcom_pcie_resources_v3 { | |||
110 | struct reset_control *phy_ahb_reset; | 136 | struct reset_control *phy_ahb_reset; |
111 | }; | 137 | }; |
112 | 138 | ||
139 | struct qcom_pcie_resources_2_3_3 { | ||
140 | struct clk *iface; | ||
141 | struct clk *axi_m_clk; | ||
142 | struct clk *axi_s_clk; | ||
143 | struct clk *ahb_clk; | ||
144 | struct clk *aux_clk; | ||
145 | struct reset_control *rst[7]; | ||
146 | }; | ||
147 | |||
113 | union qcom_pcie_resources { | 148 | union qcom_pcie_resources { |
114 | struct qcom_pcie_resources_v0 v0; | 149 | struct qcom_pcie_resources_1_0_0 v1_0_0; |
115 | struct qcom_pcie_resources_v1 v1; | 150 | struct qcom_pcie_resources_2_1_0 v2_1_0; |
116 | struct qcom_pcie_resources_v2 v2; | 151 | struct qcom_pcie_resources_2_3_2 v2_3_2; |
117 | struct qcom_pcie_resources_v3 v3; | 152 | struct qcom_pcie_resources_2_3_3 v2_3_3; |
153 | struct qcom_pcie_resources_2_4_0 v2_4_0; | ||
118 | }; | 154 | }; |
119 | 155 | ||
120 | struct qcom_pcie; | 156 | struct qcom_pcie; |
@@ -124,6 +160,7 @@ struct qcom_pcie_ops { | |||
124 | int (*init)(struct qcom_pcie *pcie); | 160 | int (*init)(struct qcom_pcie *pcie); |
125 | int (*post_init)(struct qcom_pcie *pcie); | 161 | int (*post_init)(struct qcom_pcie *pcie); |
126 | void (*deinit)(struct qcom_pcie *pcie); | 162 | void (*deinit)(struct qcom_pcie *pcie); |
163 | void (*post_deinit)(struct qcom_pcie *pcie); | ||
127 | void (*ltssm_enable)(struct qcom_pcie *pcie); | 164 | void (*ltssm_enable)(struct qcom_pcie *pcie); |
128 | }; | 165 | }; |
129 | 166 | ||
@@ -141,13 +178,13 @@ struct qcom_pcie { | |||
141 | 178 | ||
142 | static void qcom_ep_reset_assert(struct qcom_pcie *pcie) | 179 | static void qcom_ep_reset_assert(struct qcom_pcie *pcie) |
143 | { | 180 | { |
144 | gpiod_set_value(pcie->reset, 1); | 181 | gpiod_set_value_cansleep(pcie->reset, 1); |
145 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | 182 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); |
146 | } | 183 | } |
147 | 184 | ||
148 | static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) | 185 | static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) |
149 | { | 186 | { |
150 | gpiod_set_value(pcie->reset, 0); | 187 | gpiod_set_value_cansleep(pcie->reset, 0); |
151 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | 188 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); |
152 | } | 189 | } |
153 | 190 | ||
@@ -172,7 +209,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie) | |||
172 | return dw_pcie_wait_for_link(pci); | 209 | return dw_pcie_wait_for_link(pci); |
173 | } | 210 | } |
174 | 211 | ||
175 | static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) | 212 | static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) |
176 | { | 213 | { |
177 | u32 val; | 214 | u32 val; |
178 | 215 | ||
@@ -182,9 +219,9 @@ static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) | |||
182 | writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); | 219 | writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); |
183 | } | 220 | } |
184 | 221 | ||
185 | static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) | 222 | static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) |
186 | { | 223 | { |
187 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | 224 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; |
188 | struct dw_pcie *pci = pcie->pci; | 225 | struct dw_pcie *pci = pcie->pci; |
189 | struct device *dev = pci->dev; | 226 | struct device *dev = pci->dev; |
190 | 227 | ||
@@ -212,29 +249,29 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) | |||
212 | if (IS_ERR(res->phy_clk)) | 249 | if (IS_ERR(res->phy_clk)) |
213 | return PTR_ERR(res->phy_clk); | 250 | return PTR_ERR(res->phy_clk); |
214 | 251 | ||
215 | res->pci_reset = devm_reset_control_get(dev, "pci"); | 252 | res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); |
216 | if (IS_ERR(res->pci_reset)) | 253 | if (IS_ERR(res->pci_reset)) |
217 | return PTR_ERR(res->pci_reset); | 254 | return PTR_ERR(res->pci_reset); |
218 | 255 | ||
219 | res->axi_reset = devm_reset_control_get(dev, "axi"); | 256 | res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); |
220 | if (IS_ERR(res->axi_reset)) | 257 | if (IS_ERR(res->axi_reset)) |
221 | return PTR_ERR(res->axi_reset); | 258 | return PTR_ERR(res->axi_reset); |
222 | 259 | ||
223 | res->ahb_reset = devm_reset_control_get(dev, "ahb"); | 260 | res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); |
224 | if (IS_ERR(res->ahb_reset)) | 261 | if (IS_ERR(res->ahb_reset)) |
225 | return PTR_ERR(res->ahb_reset); | 262 | return PTR_ERR(res->ahb_reset); |
226 | 263 | ||
227 | res->por_reset = devm_reset_control_get(dev, "por"); | 264 | res->por_reset = devm_reset_control_get_exclusive(dev, "por"); |
228 | if (IS_ERR(res->por_reset)) | 265 | if (IS_ERR(res->por_reset)) |
229 | return PTR_ERR(res->por_reset); | 266 | return PTR_ERR(res->por_reset); |
230 | 267 | ||
231 | res->phy_reset = devm_reset_control_get(dev, "phy"); | 268 | res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); |
232 | return PTR_ERR_OR_ZERO(res->phy_reset); | 269 | return PTR_ERR_OR_ZERO(res->phy_reset); |
233 | } | 270 | } |
234 | 271 | ||
235 | static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) | 272 | static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) |
236 | { | 273 | { |
237 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | 274 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; |
238 | 275 | ||
239 | reset_control_assert(res->pci_reset); | 276 | reset_control_assert(res->pci_reset); |
240 | reset_control_assert(res->axi_reset); | 277 | reset_control_assert(res->axi_reset); |
@@ -249,9 +286,9 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) | |||
249 | regulator_disable(res->vdda_refclk); | 286 | regulator_disable(res->vdda_refclk); |
250 | } | 287 | } |
251 | 288 | ||
252 | static int qcom_pcie_init_v0(struct qcom_pcie *pcie) | 289 | static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) |
253 | { | 290 | { |
254 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | 291 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; |
255 | struct dw_pcie *pci = pcie->pci; | 292 | struct dw_pcie *pci = pcie->pci; |
256 | struct device *dev = pci->dev; | 293 | struct device *dev = pci->dev; |
257 | u32 val; | 294 | u32 val; |
@@ -367,9 +404,9 @@ err_refclk: | |||
367 | return ret; | 404 | return ret; |
368 | } | 405 | } |
369 | 406 | ||
370 | static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) | 407 | static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) |
371 | { | 408 | { |
372 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | 409 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; |
373 | struct dw_pcie *pci = pcie->pci; | 410 | struct dw_pcie *pci = pcie->pci; |
374 | struct device *dev = pci->dev; | 411 | struct device *dev = pci->dev; |
375 | 412 | ||
@@ -393,13 +430,13 @@ static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) | |||
393 | if (IS_ERR(res->slave_bus)) | 430 | if (IS_ERR(res->slave_bus)) |
394 | return PTR_ERR(res->slave_bus); | 431 | return PTR_ERR(res->slave_bus); |
395 | 432 | ||
396 | res->core = devm_reset_control_get(dev, "core"); | 433 | res->core = devm_reset_control_get_exclusive(dev, "core"); |
397 | return PTR_ERR_OR_ZERO(res->core); | 434 | return PTR_ERR_OR_ZERO(res->core); |
398 | } | 435 | } |
399 | 436 | ||
400 | static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) | 437 | static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) |
401 | { | 438 | { |
402 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | 439 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; |
403 | 440 | ||
404 | reset_control_assert(res->core); | 441 | reset_control_assert(res->core); |
405 | clk_disable_unprepare(res->slave_bus); | 442 | clk_disable_unprepare(res->slave_bus); |
@@ -409,9 +446,9 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) | |||
409 | regulator_disable(res->vdda); | 446 | regulator_disable(res->vdda); |
410 | } | 447 | } |
411 | 448 | ||
412 | static int qcom_pcie_init_v1(struct qcom_pcie *pcie) | 449 | static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) |
413 | { | 450 | { |
414 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | 451 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; |
415 | struct dw_pcie *pci = pcie->pci; | 452 | struct dw_pcie *pci = pcie->pci; |
416 | struct device *dev = pci->dev; | 453 | struct device *dev = pci->dev; |
417 | int ret; | 454 | int ret; |
@@ -477,7 +514,7 @@ err_res: | |||
477 | return ret; | 514 | return ret; |
478 | } | 515 | } |
479 | 516 | ||
480 | static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) | 517 | static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) |
481 | { | 518 | { |
482 | u32 val; | 519 | u32 val; |
483 | 520 | ||
@@ -487,9 +524,9 @@ static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) | |||
487 | writel(val, pcie->parf + PCIE20_PARF_LTSSM); | 524 | writel(val, pcie->parf + PCIE20_PARF_LTSSM); |
488 | } | 525 | } |
489 | 526 | ||
490 | static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) | 527 | static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) |
491 | { | 528 | { |
492 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | 529 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; |
493 | struct dw_pcie *pci = pcie->pci; | 530 | struct dw_pcie *pci = pcie->pci; |
494 | struct device *dev = pci->dev; | 531 | struct device *dev = pci->dev; |
495 | 532 | ||
@@ -513,20 +550,26 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) | |||
513 | return PTR_ERR_OR_ZERO(res->pipe_clk); | 550 | return PTR_ERR_OR_ZERO(res->pipe_clk); |
514 | } | 551 | } |
515 | 552 | ||
516 | static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) | 553 | static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) |
517 | { | 554 | { |
518 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | 555 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; |
519 | 556 | ||
520 | clk_disable_unprepare(res->pipe_clk); | ||
521 | clk_disable_unprepare(res->slave_clk); | 557 | clk_disable_unprepare(res->slave_clk); |
522 | clk_disable_unprepare(res->master_clk); | 558 | clk_disable_unprepare(res->master_clk); |
523 | clk_disable_unprepare(res->cfg_clk); | 559 | clk_disable_unprepare(res->cfg_clk); |
524 | clk_disable_unprepare(res->aux_clk); | 560 | clk_disable_unprepare(res->aux_clk); |
525 | } | 561 | } |
526 | 562 | ||
527 | static int qcom_pcie_init_v2(struct qcom_pcie *pcie) | 563 | static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) |
528 | { | 564 | { |
529 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | 565 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; |
566 | |||
567 | clk_disable_unprepare(res->pipe_clk); | ||
568 | } | ||
569 | |||
570 | static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) | ||
571 | { | ||
572 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
530 | struct dw_pcie *pci = pcie->pci; | 573 | struct dw_pcie *pci = pcie->pci; |
531 | struct device *dev = pci->dev; | 574 | struct device *dev = pci->dev; |
532 | u32 val; | 575 | u32 val; |
@@ -589,9 +632,9 @@ err_cfg_clk: | |||
589 | return ret; | 632 | return ret; |
590 | } | 633 | } |
591 | 634 | ||
592 | static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) | 635 | static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) |
593 | { | 636 | { |
594 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | 637 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; |
595 | struct dw_pcie *pci = pcie->pci; | 638 | struct dw_pcie *pci = pcie->pci; |
596 | struct device *dev = pci->dev; | 639 | struct device *dev = pci->dev; |
597 | int ret; | 640 | int ret; |
@@ -605,9 +648,9 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) | |||
605 | return 0; | 648 | return 0; |
606 | } | 649 | } |
607 | 650 | ||
608 | static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) | 651 | static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) |
609 | { | 652 | { |
610 | struct qcom_pcie_resources_v3 *res = &pcie->res.v3; | 653 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; |
611 | struct dw_pcie *pci = pcie->pci; | 654 | struct dw_pcie *pci = pcie->pci; |
612 | struct device *dev = pci->dev; | 655 | struct device *dev = pci->dev; |
613 | 656 | ||
@@ -623,60 +666,64 @@ static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) | |||
623 | if (IS_ERR(res->slave_clk)) | 666 | if (IS_ERR(res->slave_clk)) |
624 | return PTR_ERR(res->slave_clk); | 667 | return PTR_ERR(res->slave_clk); |
625 | 668 | ||
626 | res->axi_m_reset = devm_reset_control_get(dev, "axi_m"); | 669 | res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); |
627 | if (IS_ERR(res->axi_m_reset)) | 670 | if (IS_ERR(res->axi_m_reset)) |
628 | return PTR_ERR(res->axi_m_reset); | 671 | return PTR_ERR(res->axi_m_reset); |
629 | 672 | ||
630 | res->axi_s_reset = devm_reset_control_get(dev, "axi_s"); | 673 | res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); |
631 | if (IS_ERR(res->axi_s_reset)) | 674 | if (IS_ERR(res->axi_s_reset)) |
632 | return PTR_ERR(res->axi_s_reset); | 675 | return PTR_ERR(res->axi_s_reset); |
633 | 676 | ||
634 | res->pipe_reset = devm_reset_control_get(dev, "pipe"); | 677 | res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); |
635 | if (IS_ERR(res->pipe_reset)) | 678 | if (IS_ERR(res->pipe_reset)) |
636 | return PTR_ERR(res->pipe_reset); | 679 | return PTR_ERR(res->pipe_reset); |
637 | 680 | ||
638 | res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid"); | 681 | res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, |
682 | "axi_m_vmid"); | ||
639 | if (IS_ERR(res->axi_m_vmid_reset)) | 683 | if (IS_ERR(res->axi_m_vmid_reset)) |
640 | return PTR_ERR(res->axi_m_vmid_reset); | 684 | return PTR_ERR(res->axi_m_vmid_reset); |
641 | 685 | ||
642 | res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu"); | 686 | res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, |
687 | "axi_s_xpu"); | ||
643 | if (IS_ERR(res->axi_s_xpu_reset)) | 688 | if (IS_ERR(res->axi_s_xpu_reset)) |
644 | return PTR_ERR(res->axi_s_xpu_reset); | 689 | return PTR_ERR(res->axi_s_xpu_reset); |
645 | 690 | ||
646 | res->parf_reset = devm_reset_control_get(dev, "parf"); | 691 | res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); |
647 | if (IS_ERR(res->parf_reset)) | 692 | if (IS_ERR(res->parf_reset)) |
648 | return PTR_ERR(res->parf_reset); | 693 | return PTR_ERR(res->parf_reset); |
649 | 694 | ||
650 | res->phy_reset = devm_reset_control_get(dev, "phy"); | 695 | res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); |
651 | if (IS_ERR(res->phy_reset)) | 696 | if (IS_ERR(res->phy_reset)) |
652 | return PTR_ERR(res->phy_reset); | 697 | return PTR_ERR(res->phy_reset); |
653 | 698 | ||
654 | res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky"); | 699 | res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, |
700 | "axi_m_sticky"); | ||
655 | if (IS_ERR(res->axi_m_sticky_reset)) | 701 | if (IS_ERR(res->axi_m_sticky_reset)) |
656 | return PTR_ERR(res->axi_m_sticky_reset); | 702 | return PTR_ERR(res->axi_m_sticky_reset); |
657 | 703 | ||
658 | res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky"); | 704 | res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, |
705 | "pipe_sticky"); | ||
659 | if (IS_ERR(res->pipe_sticky_reset)) | 706 | if (IS_ERR(res->pipe_sticky_reset)) |
660 | return PTR_ERR(res->pipe_sticky_reset); | 707 | return PTR_ERR(res->pipe_sticky_reset); |
661 | 708 | ||
662 | res->pwr_reset = devm_reset_control_get(dev, "pwr"); | 709 | res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); |
663 | if (IS_ERR(res->pwr_reset)) | 710 | if (IS_ERR(res->pwr_reset)) |
664 | return PTR_ERR(res->pwr_reset); | 711 | return PTR_ERR(res->pwr_reset); |
665 | 712 | ||
666 | res->ahb_reset = devm_reset_control_get(dev, "ahb"); | 713 | res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); |
667 | if (IS_ERR(res->ahb_reset)) | 714 | if (IS_ERR(res->ahb_reset)) |
668 | return PTR_ERR(res->ahb_reset); | 715 | return PTR_ERR(res->ahb_reset); |
669 | 716 | ||
670 | res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb"); | 717 | res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); |
671 | if (IS_ERR(res->phy_ahb_reset)) | 718 | if (IS_ERR(res->phy_ahb_reset)) |
672 | return PTR_ERR(res->phy_ahb_reset); | 719 | return PTR_ERR(res->phy_ahb_reset); |
673 | 720 | ||
674 | return 0; | 721 | return 0; |
675 | } | 722 | } |
676 | 723 | ||
677 | static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) | 724 | static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) |
678 | { | 725 | { |
679 | struct qcom_pcie_resources_v3 *res = &pcie->res.v3; | 726 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; |
680 | 727 | ||
681 | reset_control_assert(res->axi_m_reset); | 728 | reset_control_assert(res->axi_m_reset); |
682 | reset_control_assert(res->axi_s_reset); | 729 | reset_control_assert(res->axi_s_reset); |
@@ -692,9 +739,9 @@ static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) | |||
692 | clk_disable_unprepare(res->slave_clk); | 739 | clk_disable_unprepare(res->slave_clk); |
693 | } | 740 | } |
694 | 741 | ||
695 | static int qcom_pcie_init_v3(struct qcom_pcie *pcie) | 742 | static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) |
696 | { | 743 | { |
697 | struct qcom_pcie_resources_v3 *res = &pcie->res.v3; | 744 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; |
698 | struct dw_pcie *pci = pcie->pci; | 745 | struct dw_pcie *pci = pcie->pci; |
699 | struct device *dev = pci->dev; | 746 | struct device *dev = pci->dev; |
700 | u32 val; | 747 | u32 val; |
@@ -884,6 +931,166 @@ err_rst_phy: | |||
884 | return ret; | 931 | return ret; |
885 | } | 932 | } |
886 | 933 | ||
934 | static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) | ||
935 | { | ||
936 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
937 | struct dw_pcie *pci = pcie->pci; | ||
938 | struct device *dev = pci->dev; | ||
939 | int i; | ||
940 | const char *rst_names[] = { "axi_m", "axi_s", "pipe", | ||
941 | "axi_m_sticky", "sticky", | ||
942 | "ahb", "sleep", }; | ||
943 | |||
944 | res->iface = devm_clk_get(dev, "iface"); | ||
945 | if (IS_ERR(res->iface)) | ||
946 | return PTR_ERR(res->iface); | ||
947 | |||
948 | res->axi_m_clk = devm_clk_get(dev, "axi_m"); | ||
949 | if (IS_ERR(res->axi_m_clk)) | ||
950 | return PTR_ERR(res->axi_m_clk); | ||
951 | |||
952 | res->axi_s_clk = devm_clk_get(dev, "axi_s"); | ||
953 | if (IS_ERR(res->axi_s_clk)) | ||
954 | return PTR_ERR(res->axi_s_clk); | ||
955 | |||
956 | res->ahb_clk = devm_clk_get(dev, "ahb"); | ||
957 | if (IS_ERR(res->ahb_clk)) | ||
958 | return PTR_ERR(res->ahb_clk); | ||
959 | |||
960 | res->aux_clk = devm_clk_get(dev, "aux"); | ||
961 | if (IS_ERR(res->aux_clk)) | ||
962 | return PTR_ERR(res->aux_clk); | ||
963 | |||
964 | for (i = 0; i < ARRAY_SIZE(rst_names); i++) { | ||
965 | res->rst[i] = devm_reset_control_get(dev, rst_names[i]); | ||
966 | if (IS_ERR(res->rst[i])) | ||
967 | return PTR_ERR(res->rst[i]); | ||
968 | } | ||
969 | |||
970 | return 0; | ||
971 | } | ||
972 | |||
973 | static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) | ||
974 | { | ||
975 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
976 | |||
977 | clk_disable_unprepare(res->iface); | ||
978 | clk_disable_unprepare(res->axi_m_clk); | ||
979 | clk_disable_unprepare(res->axi_s_clk); | ||
980 | clk_disable_unprepare(res->ahb_clk); | ||
981 | clk_disable_unprepare(res->aux_clk); | ||
982 | } | ||
983 | |||
984 | static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) | ||
985 | { | ||
986 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
987 | struct dw_pcie *pci = pcie->pci; | ||
988 | struct device *dev = pci->dev; | ||
989 | int i, ret; | ||
990 | u32 val; | ||
991 | |||
992 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) { | ||
993 | ret = reset_control_assert(res->rst[i]); | ||
994 | if (ret) { | ||
995 | dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); | ||
996 | return ret; | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | usleep_range(2000, 2500); | ||
1001 | |||
1002 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) { | ||
1003 | ret = reset_control_deassert(res->rst[i]); | ||
1004 | if (ret) { | ||
1005 | dev_err(dev, "reset #%d deassert failed (%d)\n", i, | ||
1006 | ret); | ||
1007 | return ret; | ||
1008 | } | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * Don't have a way to see if the reset has completed. | ||
1013 | * Wait for some time. | ||
1014 | */ | ||
1015 | usleep_range(2000, 2500); | ||
1016 | |||
1017 | ret = clk_prepare_enable(res->iface); | ||
1018 | if (ret) { | ||
1019 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
1020 | goto err_clk_iface; | ||
1021 | } | ||
1022 | |||
1023 | ret = clk_prepare_enable(res->axi_m_clk); | ||
1024 | if (ret) { | ||
1025 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
1026 | goto err_clk_axi_m; | ||
1027 | } | ||
1028 | |||
1029 | ret = clk_prepare_enable(res->axi_s_clk); | ||
1030 | if (ret) { | ||
1031 | dev_err(dev, "cannot prepare/enable axi slave clock\n"); | ||
1032 | goto err_clk_axi_s; | ||
1033 | } | ||
1034 | |||
1035 | ret = clk_prepare_enable(res->ahb_clk); | ||
1036 | if (ret) { | ||
1037 | dev_err(dev, "cannot prepare/enable ahb clock\n"); | ||
1038 | goto err_clk_ahb; | ||
1039 | } | ||
1040 | |||
1041 | ret = clk_prepare_enable(res->aux_clk); | ||
1042 | if (ret) { | ||
1043 | dev_err(dev, "cannot prepare/enable aux clock\n"); | ||
1044 | goto err_clk_aux; | ||
1045 | } | ||
1046 | |||
1047 | writel(SLV_ADDR_SPACE_SZ, | ||
1048 | pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); | ||
1049 | |||
1050 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
1051 | val &= ~BIT(0); | ||
1052 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
1053 | |||
1054 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
1055 | |||
1056 | writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | ||
1057 | | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | | ||
1058 | AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, | ||
1059 | pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
1060 | writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); | ||
1061 | |||
1062 | writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); | ||
1063 | writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); | ||
1064 | writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); | ||
1065 | |||
1066 | val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); | ||
1067 | val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; | ||
1068 | writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); | ||
1069 | |||
1070 | writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + | ||
1071 | PCIE20_DEVICE_CONTROL2_STATUS2); | ||
1072 | |||
1073 | return 0; | ||
1074 | |||
1075 | err_clk_aux: | ||
1076 | clk_disable_unprepare(res->ahb_clk); | ||
1077 | err_clk_ahb: | ||
1078 | clk_disable_unprepare(res->axi_s_clk); | ||
1079 | err_clk_axi_s: | ||
1080 | clk_disable_unprepare(res->axi_m_clk); | ||
1081 | err_clk_axi_m: | ||
1082 | clk_disable_unprepare(res->iface); | ||
1083 | err_clk_iface: | ||
1084 | /* | ||
1085 | * Not checking for failure, will anyway return | ||
1086 | * the original failure in 'ret'. | ||
1087 | */ | ||
1088 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) | ||
1089 | reset_control_assert(res->rst[i]); | ||
1090 | |||
1091 | return ret; | ||
1092 | } | ||
1093 | |||
887 | static int qcom_pcie_link_up(struct dw_pcie *pci) | 1094 | static int qcom_pcie_link_up(struct dw_pcie *pci) |
888 | { | 1095 | { |
889 | u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); | 1096 | u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); |
@@ -891,7 +1098,7 @@ static int qcom_pcie_link_up(struct dw_pcie *pci) | |||
891 | return !!(val & PCI_EXP_LNKSTA_DLLLA); | 1098 | return !!(val & PCI_EXP_LNKSTA_DLLLA); |
892 | } | 1099 | } |
893 | 1100 | ||
894 | static void qcom_pcie_host_init(struct pcie_port *pp) | 1101 | static int qcom_pcie_host_init(struct pcie_port *pp) |
895 | { | 1102 | { |
896 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 1103 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
897 | struct qcom_pcie *pcie = to_qcom_pcie(pci); | 1104 | struct qcom_pcie *pcie = to_qcom_pcie(pci); |
@@ -901,14 +1108,17 @@ static void qcom_pcie_host_init(struct pcie_port *pp) | |||
901 | 1108 | ||
902 | ret = pcie->ops->init(pcie); | 1109 | ret = pcie->ops->init(pcie); |
903 | if (ret) | 1110 | if (ret) |
904 | goto err_deinit; | 1111 | return ret; |
905 | 1112 | ||
906 | ret = phy_power_on(pcie->phy); | 1113 | ret = phy_power_on(pcie->phy); |
907 | if (ret) | 1114 | if (ret) |
908 | goto err_deinit; | 1115 | goto err_deinit; |
909 | 1116 | ||
910 | if (pcie->ops->post_init) | 1117 | if (pcie->ops->post_init) { |
911 | pcie->ops->post_init(pcie); | 1118 | ret = pcie->ops->post_init(pcie); |
1119 | if (ret) | ||
1120 | goto err_disable_phy; | ||
1121 | } | ||
912 | 1122 | ||
913 | dw_pcie_setup_rc(pp); | 1123 | dw_pcie_setup_rc(pp); |
914 | 1124 | ||
@@ -921,12 +1131,17 @@ static void qcom_pcie_host_init(struct pcie_port *pp) | |||
921 | if (ret) | 1131 | if (ret) |
922 | goto err; | 1132 | goto err; |
923 | 1133 | ||
924 | return; | 1134 | return 0; |
925 | err: | 1135 | err: |
926 | qcom_ep_reset_assert(pcie); | 1136 | qcom_ep_reset_assert(pcie); |
1137 | if (pcie->ops->post_deinit) | ||
1138 | pcie->ops->post_deinit(pcie); | ||
1139 | err_disable_phy: | ||
927 | phy_power_off(pcie->phy); | 1140 | phy_power_off(pcie->phy); |
928 | err_deinit: | 1141 | err_deinit: |
929 | pcie->ops->deinit(pcie); | 1142 | pcie->ops->deinit(pcie); |
1143 | |||
1144 | return ret; | ||
930 | } | 1145 | } |
931 | 1146 | ||
932 | static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | 1147 | static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, |
@@ -950,37 +1165,50 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { | |||
950 | .rd_own_conf = qcom_pcie_rd_own_conf, | 1165 | .rd_own_conf = qcom_pcie_rd_own_conf, |
951 | }; | 1166 | }; |
952 | 1167 | ||
953 | static const struct qcom_pcie_ops ops_v0 = { | 1168 | /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ |
954 | .get_resources = qcom_pcie_get_resources_v0, | 1169 | static const struct qcom_pcie_ops ops_2_1_0 = { |
955 | .init = qcom_pcie_init_v0, | 1170 | .get_resources = qcom_pcie_get_resources_2_1_0, |
956 | .deinit = qcom_pcie_deinit_v0, | 1171 | .init = qcom_pcie_init_2_1_0, |
957 | .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable, | 1172 | .deinit = qcom_pcie_deinit_2_1_0, |
1173 | .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, | ||
958 | }; | 1174 | }; |
959 | 1175 | ||
960 | static const struct qcom_pcie_ops ops_v1 = { | 1176 | /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ |
961 | .get_resources = qcom_pcie_get_resources_v1, | 1177 | static const struct qcom_pcie_ops ops_1_0_0 = { |
962 | .init = qcom_pcie_init_v1, | 1178 | .get_resources = qcom_pcie_get_resources_1_0_0, |
963 | .deinit = qcom_pcie_deinit_v1, | 1179 | .init = qcom_pcie_init_1_0_0, |
964 | .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable, | 1180 | .deinit = qcom_pcie_deinit_1_0_0, |
1181 | .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, | ||
965 | }; | 1182 | }; |
966 | 1183 | ||
967 | static const struct qcom_pcie_ops ops_v2 = { | 1184 | /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ |
968 | .get_resources = qcom_pcie_get_resources_v2, | 1185 | static const struct qcom_pcie_ops ops_2_3_2 = { |
969 | .init = qcom_pcie_init_v2, | 1186 | .get_resources = qcom_pcie_get_resources_2_3_2, |
970 | .post_init = qcom_pcie_post_init_v2, | 1187 | .init = qcom_pcie_init_2_3_2, |
971 | .deinit = qcom_pcie_deinit_v2, | 1188 | .post_init = qcom_pcie_post_init_2_3_2, |
972 | .ltssm_enable = qcom_pcie_v2_ltssm_enable, | 1189 | .deinit = qcom_pcie_deinit_2_3_2, |
1190 | .post_deinit = qcom_pcie_post_deinit_2_3_2, | ||
1191 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
973 | }; | 1192 | }; |
974 | 1193 | ||
975 | static const struct dw_pcie_ops dw_pcie_ops = { | 1194 | /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ |
976 | .link_up = qcom_pcie_link_up, | 1195 | static const struct qcom_pcie_ops ops_2_4_0 = { |
1196 | .get_resources = qcom_pcie_get_resources_2_4_0, | ||
1197 | .init = qcom_pcie_init_2_4_0, | ||
1198 | .deinit = qcom_pcie_deinit_2_4_0, | ||
1199 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
977 | }; | 1200 | }; |
978 | 1201 | ||
979 | static const struct qcom_pcie_ops ops_v3 = { | 1202 | /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ |
980 | .get_resources = qcom_pcie_get_resources_v3, | 1203 | static const struct qcom_pcie_ops ops_2_3_3 = { |
981 | .init = qcom_pcie_init_v3, | 1204 | .get_resources = qcom_pcie_get_resources_2_3_3, |
982 | .deinit = qcom_pcie_deinit_v3, | 1205 | .init = qcom_pcie_init_2_3_3, |
983 | .ltssm_enable = qcom_pcie_v2_ltssm_enable, | 1206 | .deinit = qcom_pcie_deinit_2_3_3, |
1207 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
1208 | }; | ||
1209 | |||
1210 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
1211 | .link_up = qcom_pcie_link_up, | ||
984 | }; | 1212 | }; |
985 | 1213 | ||
986 | static int qcom_pcie_probe(struct platform_device *pdev) | 1214 | static int qcom_pcie_probe(struct platform_device *pdev) |
@@ -1069,11 +1297,12 @@ static int qcom_pcie_probe(struct platform_device *pdev) | |||
1069 | } | 1297 | } |
1070 | 1298 | ||
1071 | static const struct of_device_id qcom_pcie_match[] = { | 1299 | static const struct of_device_id qcom_pcie_match[] = { |
1072 | { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, | 1300 | { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, |
1073 | { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, | 1301 | { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, |
1074 | { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, | 1302 | { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, |
1075 | { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, | 1303 | { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, |
1076 | { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 }, | 1304 | { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, |
1305 | { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, | ||
1077 | { } | 1306 | { } |
1078 | }; | 1307 | }; |
1079 | 1308 | ||
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index 80897291e0fb..709189d23b31 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c | |||
@@ -177,13 +177,15 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci) | |||
177 | return 0; | 177 | return 0; |
178 | } | 178 | } |
179 | 179 | ||
180 | static void spear13xx_pcie_host_init(struct pcie_port *pp) | 180 | static int spear13xx_pcie_host_init(struct pcie_port *pp) |
181 | { | 181 | { |
182 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 182 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
183 | struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); | 183 | struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); |
184 | 184 | ||
185 | spear13xx_pcie_establish_link(spear13xx_pcie); | 185 | spear13xx_pcie_establish_link(spear13xx_pcie); |
186 | spear13xx_pcie_enable_interrupts(spear13xx_pcie); | 186 | spear13xx_pcie_enable_interrupts(spear13xx_pcie); |
187 | |||
188 | return 0; | ||
187 | } | 189 | } |
188 | 190 | ||
189 | static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { | 191 | static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { |
@@ -199,9 +201,9 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, | |||
199 | int ret; | 201 | int ret; |
200 | 202 | ||
201 | pp->irq = platform_get_irq(pdev, 0); | 203 | pp->irq = platform_get_irq(pdev, 0); |
202 | if (!pp->irq) { | 204 | if (pp->irq < 0) { |
203 | dev_err(dev, "failed to get irq\n"); | 205 | dev_err(dev, "failed to get irq\n"); |
204 | return -ENODEV; | 206 | return pp->irq; |
205 | } | 207 | } |
206 | ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, | 208 | ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, |
207 | IRQF_SHARED | IRQF_NO_THREAD, | 209 | IRQF_SHARED | IRQF_NO_THREAD, |