aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dwc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/dwc')
-rw-r--r--drivers/pci/dwc/Kconfig36
-rw-r--r--drivers/pci/dwc/Makefile5
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c293
-rw-r--r--drivers/pci/dwc/pci-exynos.c14
-rw-r--r--drivers/pci/dwc/pci-imx6.c199
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c2
-rw-r--r--drivers/pci/dwc/pci-layerscape.c3
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c3
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c12
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c342
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c39
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/dwc/pcie-designware.c258
-rw-r--r--drivers/pci/dwc/pcie-designware.h135
-rw-r--r--drivers/pci/dwc/pcie-hisi.c9
-rw-r--r--drivers/pci/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c3
17 files changed, 1177 insertions, 179 deletions
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index d2d2ba5b8a68..b7e15526d676 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -9,16 +9,44 @@ config PCIE_DW_HOST
9 depends on PCI_MSI_IRQ_DOMAIN 9 depends on PCI_MSI_IRQ_DOMAIN
10 select PCIE_DW 10 select PCIE_DW
11 11
12config PCIE_DW_EP
13 bool
14 depends on PCI_ENDPOINT
15 select PCIE_DW
16
12config PCI_DRA7XX 17config PCI_DRA7XX
13 bool "TI DRA7xx PCIe controller" 18 bool "TI DRA7xx PCIe controller"
14 depends on PCI 19 depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
15 depends on OF && HAS_IOMEM && TI_PIPE3 20 depends on OF && HAS_IOMEM && TI_PIPE3
21 help
22 Enables support for the PCIe controller in the DRA7xx SoC. There
23 are two instances of PCIe controller in DRA7xx. This controller can
24 work either as EP or RC. In order to enable host-specific features
25 PCI_DRA7XX_HOST must be selected and in order to enable device-
26 specific features PCI_DRA7XX_EP must be selected. This uses
27 the Designware core.
28
29if PCI_DRA7XX
30
31config PCI_DRA7XX_HOST
32 bool "PCI DRA7xx Host Mode"
33 depends on PCI
16 depends on PCI_MSI_IRQ_DOMAIN 34 depends on PCI_MSI_IRQ_DOMAIN
17 select PCIE_DW_HOST 35 select PCIE_DW_HOST
36 default y
18 help 37 help
19 Enables support for the PCIe controller in the DRA7xx SoC. There 38 Enables support for the PCIe controller in the DRA7xx SoC to work in
20 are two instances of PCIe controller in DRA7xx. This controller can 39 host mode.
21 act both as EP and RC. This reuses the Designware core. 40
41config PCI_DRA7XX_EP
42 bool "PCI DRA7xx Endpoint Mode"
43 depends on PCI_ENDPOINT
44 select PCIE_DW_EP
45 help
46 Enables support for the PCIe controller in the DRA7xx SoC to work in
47 endpoint mode.
48
49endif
22 50
23config PCIE_DW_PLAT 51config PCIE_DW_PLAT
24 bool "Platform bus based DesignWare PCIe Controller" 52 bool "Platform bus based DesignWare PCIe Controller"
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index a2df13c28798..f31a8596442a 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -1,7 +1,10 @@
1obj-$(CONFIG_PCIE_DW) += pcie-designware.o 1obj-$(CONFIG_PCIE_DW) += pcie-designware.o
2obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o 2obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
3obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
3obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o 4obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
4obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o 5ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),)
6 obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
7endif
5obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o 8obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
6obj-$(CONFIG_PCI_IMX6) += pci-imx6.o 9obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
7obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o 10obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 0984baff07e3..8decf46cf525 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -10,12 +10,14 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/delay.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/irq.h> 16#include <linux/irq.h>
16#include <linux/irqdomain.h> 17#include <linux/irqdomain.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/of_device.h>
19#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
20#include <linux/of_pci.h> 22#include <linux/of_pci.h>
21#include <linux/pci.h> 23#include <linux/pci.h>
@@ -24,6 +26,8 @@
24#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
25#include <linux/resource.h> 27#include <linux/resource.h>
26#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
27 31
28#include "pcie-designware.h" 32#include "pcie-designware.h"
29 33
@@ -57,6 +61,11 @@
57#define MSI BIT(4) 61#define MSI BIT(4)
58#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 62#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
59 63
64#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
65#define DEVICE_TYPE_EP 0x0
66#define DEVICE_TYPE_LEG_EP 0x1
67#define DEVICE_TYPE_RC 0x4
68
60#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 69#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
61#define LTSSM_EN 0x1 70#define LTSSM_EN 0x1
62 71
@@ -66,6 +75,13 @@
66 75
67#define EXP_CAP_ID_OFFSET 0x70 76#define EXP_CAP_ID_OFFSET 0x70
68 77
78#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
79#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
80
81#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
82#define MSI_REQ_GRANT BIT(0)
83#define MSI_VECTOR_SHIFT 7
84
69struct dra7xx_pcie { 85struct dra7xx_pcie {
70 struct dw_pcie *pci; 86 struct dw_pcie *pci;
71 void __iomem *base; /* DT ti_conf */ 87 void __iomem *base; /* DT ti_conf */
@@ -73,6 +89,11 @@ struct dra7xx_pcie {
73 struct phy **phy; 89 struct phy **phy;
74 int link_gen; 90 int link_gen;
75 struct irq_domain *irq_domain; 91 struct irq_domain *irq_domain;
92 enum dw_pcie_device_mode mode;
93};
94
95struct dra7xx_pcie_of_data {
96 enum dw_pcie_device_mode mode;
76}; 97};
77 98
78#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 99#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
@@ -88,6 +109,11 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
88 writel(value, pcie->base + offset); 109 writel(value, pcie->base + offset);
89} 110}
90 111
112static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
113{
114 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
115}
116
91static int dra7xx_pcie_link_up(struct dw_pcie *pci) 117static int dra7xx_pcie_link_up(struct dw_pcie *pci)
92{ 118{
93 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 119 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -96,9 +122,19 @@ static int dra7xx_pcie_link_up(struct dw_pcie *pci)
96 return !!(reg & LINK_UP); 122 return !!(reg & LINK_UP);
97} 123}
98 124
99static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx) 125static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
100{ 126{
101 struct dw_pcie *pci = dra7xx->pci; 127 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
128 u32 reg;
129
130 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
131 reg &= ~LTSSM_EN;
132 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
133}
134
135static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
136{
137 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
102 struct device *dev = pci->dev; 138 struct device *dev = pci->dev;
103 u32 reg; 139 u32 reg;
104 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 140 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
@@ -132,34 +168,42 @@ static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
132 reg |= LTSSM_EN; 168 reg |= LTSSM_EN;
133 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
134 170
135 return dw_pcie_wait_for_link(pci); 171 return 0;
136} 172}
137 173
138static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 174static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
139{ 175{
140 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
141 ~INTERRUPTS);
142 dra7xx_pcie_writel(dra7xx,
143 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
144 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
145 ~LEG_EP_INTERRUPTS & ~MSI); 177 ~LEG_EP_INTERRUPTS & ~MSI);
146 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 178
179 dra7xx_pcie_writel(dra7xx,
180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
147 MSI | LEG_EP_INTERRUPTS); 181 MSI | LEG_EP_INTERRUPTS);
148} 182}
149 183
184static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
185{
186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
187 ~INTERRUPTS);
188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
189 INTERRUPTS);
190}
191
192static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
193{
194 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
195 dra7xx_pcie_enable_msi_interrupts(dra7xx);
196}
197
150static void dra7xx_pcie_host_init(struct pcie_port *pp) 198static void dra7xx_pcie_host_init(struct pcie_port *pp)
151{ 199{
152 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 200 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
153 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 201 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
154 202
155 pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
156 pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
157 pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
158 pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
159
160 dw_pcie_setup_rc(pp); 203 dw_pcie_setup_rc(pp);
161 204
162 dra7xx_pcie_establish_link(dra7xx); 205 dra7xx_pcie_establish_link(pci);
206 dw_pcie_wait_for_link(pci);
163 dw_pcie_msi_init(pp); 207 dw_pcie_msi_init(pp);
164 dra7xx_pcie_enable_interrupts(dra7xx); 208 dra7xx_pcie_enable_interrupts(dra7xx);
165} 209}
@@ -237,6 +281,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
237 struct dra7xx_pcie *dra7xx = arg; 281 struct dra7xx_pcie *dra7xx = arg;
238 struct dw_pcie *pci = dra7xx->pci; 282 struct dw_pcie *pci = dra7xx->pci;
239 struct device *dev = pci->dev; 283 struct device *dev = pci->dev;
284 struct dw_pcie_ep *ep = &pci->ep;
240 u32 reg; 285 u32 reg;
241 286
242 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 287 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
@@ -273,8 +318,11 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
273 if (reg & LINK_REQ_RST) 318 if (reg & LINK_REQ_RST)
274 dev_dbg(dev, "Link Request Reset\n"); 319 dev_dbg(dev, "Link Request Reset\n");
275 320
276 if (reg & LINK_UP_EVT) 321 if (reg & LINK_UP_EVT) {
322 if (dra7xx->mode == DW_PCIE_EP_TYPE)
323 dw_pcie_ep_linkup(ep);
277 dev_dbg(dev, "Link-up state change\n"); 324 dev_dbg(dev, "Link-up state change\n");
325 }
278 326
279 if (reg & CFG_BME_EVT) 327 if (reg & CFG_BME_EVT)
280 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 328 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
@@ -287,6 +335,94 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
287 return IRQ_HANDLED; 335 return IRQ_HANDLED;
288} 336}
289 337
338static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
339{
340 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
341 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
342
343 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
344}
345
346static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
347{
348 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
349 mdelay(1);
350 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
351}
352
353static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
354 u8 interrupt_num)
355{
356 u32 reg;
357
358 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
359 reg |= MSI_REQ_GRANT;
360 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
361}
362
363static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
364 enum pci_epc_irq_type type, u8 interrupt_num)
365{
366 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
367 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
368
369 switch (type) {
370 case PCI_EPC_IRQ_LEGACY:
371 dra7xx_pcie_raise_legacy_irq(dra7xx);
372 break;
373 case PCI_EPC_IRQ_MSI:
374 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
375 break;
376 default:
377 dev_err(pci->dev, "UNKNOWN IRQ type\n");
378 }
379
380 return 0;
381}
382
383static struct dw_pcie_ep_ops pcie_ep_ops = {
384 .ep_init = dra7xx_pcie_ep_init,
385 .raise_irq = dra7xx_pcie_raise_irq,
386};
387
388static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
389 struct platform_device *pdev)
390{
391 int ret;
392 struct dw_pcie_ep *ep;
393 struct resource *res;
394 struct device *dev = &pdev->dev;
395 struct dw_pcie *pci = dra7xx->pci;
396
397 ep = &pci->ep;
398 ep->ops = &pcie_ep_ops;
399
400 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
401 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
402 if (!pci->dbi_base)
403 return -ENOMEM;
404
405 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
406 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
407 if (!pci->dbi_base2)
408 return -ENOMEM;
409
410 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
411 if (!res)
412 return -EINVAL;
413
414 ep->phys_base = res->start;
415 ep->addr_size = resource_size(res);
416
417 ret = dw_pcie_ep_init(ep);
418 if (ret) {
419 dev_err(dev, "failed to initialize endpoint\n");
420 return ret;
421 }
422
423 return 0;
424}
425
290static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 426static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
291 struct platform_device *pdev) 427 struct platform_device *pdev)
292{ 428{
@@ -329,6 +465,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
329} 465}
330 466
331static const struct dw_pcie_ops dw_pcie_ops = { 467static const struct dw_pcie_ops dw_pcie_ops = {
468 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
469 .start_link = dra7xx_pcie_establish_link,
470 .stop_link = dra7xx_pcie_stop_link,
332 .link_up = dra7xx_pcie_link_up, 471 .link_up = dra7xx_pcie_link_up,
333}; 472};
334 473
@@ -371,6 +510,68 @@ err_phy:
371 return ret; 510 return ret;
372} 511}
373 512
513static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
514 .mode = DW_PCIE_RC_TYPE,
515};
516
517static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
518 .mode = DW_PCIE_EP_TYPE,
519};
520
521static const struct of_device_id of_dra7xx_pcie_match[] = {
522 {
523 .compatible = "ti,dra7-pcie",
524 .data = &dra7xx_pcie_rc_of_data,
525 },
526 {
527 .compatible = "ti,dra7-pcie-ep",
528 .data = &dra7xx_pcie_ep_of_data,
529 },
530 {},
531};
532
533/*
534 * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
535 * @dra7xx: the dra7xx device where the workaround should be applied
536 *
537 * Access to the PCIe slave port that are not 32-bit aligned will result
538 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
539 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
540 * 0x3.
541 *
542 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
543 */
544static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
545{
546 int ret;
547 struct device_node *np = dev->of_node;
548 struct of_phandle_args args;
549 struct regmap *regmap;
550
551 regmap = syscon_regmap_lookup_by_phandle(np,
552 "ti,syscon-unaligned-access");
553 if (IS_ERR(regmap)) {
554 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
555 return -EINVAL;
556 }
557
558 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
559 2, 0, &args);
560 if (ret) {
561 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
562 return ret;
563 }
564
565 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
566 args.args[1]);
567 if (ret)
568 dev_err(dev, "failed to enable unaligned access\n");
569
570 of_node_put(args.np);
571
572 return ret;
573}
574
374static int __init dra7xx_pcie_probe(struct platform_device *pdev) 575static int __init dra7xx_pcie_probe(struct platform_device *pdev)
375{ 576{
376 u32 reg; 577 u32 reg;
@@ -388,6 +589,16 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
388 struct device_node *np = dev->of_node; 589 struct device_node *np = dev->of_node;
389 char name[10]; 590 char name[10];
390 struct gpio_desc *reset; 591 struct gpio_desc *reset;
592 const struct of_device_id *match;
593 const struct dra7xx_pcie_of_data *data;
594 enum dw_pcie_device_mode mode;
595
596 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
597 if (!match)
598 return -EINVAL;
599
600 data = (struct dra7xx_pcie_of_data *)match->data;
601 mode = (enum dw_pcie_device_mode)data->mode;
391 602
392 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 603 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
393 if (!dra7xx) 604 if (!dra7xx)
@@ -409,13 +620,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
409 return -EINVAL; 620 return -EINVAL;
410 } 621 }
411 622
412 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
413 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
414 if (ret) {
415 dev_err(dev, "failed to request irq\n");
416 return ret;
417 }
418
419 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); 623 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
420 base = devm_ioremap_nocache(dev, res->start, resource_size(res)); 624 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
421 if (!base) 625 if (!base)
@@ -473,9 +677,37 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
473 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) 677 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
474 dra7xx->link_gen = 2; 678 dra7xx->link_gen = 2;
475 679
476 ret = dra7xx_add_pcie_port(dra7xx, pdev); 680 switch (mode) {
477 if (ret < 0) 681 case DW_PCIE_RC_TYPE:
682 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
683 DEVICE_TYPE_RC);
684 ret = dra7xx_add_pcie_port(dra7xx, pdev);
685 if (ret < 0)
686 goto err_gpio;
687 break;
688 case DW_PCIE_EP_TYPE:
689 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
690 DEVICE_TYPE_EP);
691
692 ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
693 if (ret)
694 goto err_gpio;
695
696 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
697 if (ret < 0)
698 goto err_gpio;
699 break;
700 default:
701 dev_err(dev, "INVALID device type %d\n", mode);
702 }
703 dra7xx->mode = mode;
704
705 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
706 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
707 if (ret) {
708 dev_err(dev, "failed to request irq\n");
478 goto err_gpio; 709 goto err_gpio;
710 }
479 711
480 return 0; 712 return 0;
481 713
@@ -496,6 +728,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
496 struct dw_pcie *pci = dra7xx->pci; 728 struct dw_pcie *pci = dra7xx->pci;
497 u32 val; 729 u32 val;
498 730
731 if (dra7xx->mode != DW_PCIE_RC_TYPE)
732 return 0;
733
499 /* clear MSE */ 734 /* clear MSE */
500 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 735 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
501 val &= ~PCI_COMMAND_MEMORY; 736 val &= ~PCI_COMMAND_MEMORY;
@@ -510,6 +745,9 @@ static int dra7xx_pcie_resume(struct device *dev)
510 struct dw_pcie *pci = dra7xx->pci; 745 struct dw_pcie *pci = dra7xx->pci;
511 u32 val; 746 u32 val;
512 747
748 if (dra7xx->mode != DW_PCIE_RC_TYPE)
749 return 0;
750
513 /* set MSE */ 751 /* set MSE */
514 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 752 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
515 val |= PCI_COMMAND_MEMORY; 753 val |= PCI_COMMAND_MEMORY;
@@ -548,11 +786,6 @@ static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
548 dra7xx_pcie_resume_noirq) 786 dra7xx_pcie_resume_noirq)
549}; 787};
550 788
551static const struct of_device_id of_dra7xx_pcie_match[] = {
552 { .compatible = "ti,dra7-pcie", },
553 {},
554};
555
556static struct platform_driver dra7xx_pcie_driver = { 789static struct platform_driver dra7xx_pcie_driver = {
557 .driver = { 790 .driver = {
558 .name = "dra7-pcie", 791 .name = "dra7-pcie",
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 44f774c12fb2..546082ad5a3f 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -521,23 +521,25 @@ static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
521 exynos_pcie_msi_init(ep); 521 exynos_pcie_msi_init(ep);
522} 522}
523 523
524static u32 exynos_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) 524static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
525 u32 reg, size_t size)
525{ 526{
526 struct exynos_pcie *ep = to_exynos_pcie(pci); 527 struct exynos_pcie *ep = to_exynos_pcie(pci);
527 u32 val; 528 u32 val;
528 529
529 exynos_pcie_sideband_dbi_r_mode(ep, true); 530 exynos_pcie_sideband_dbi_r_mode(ep, true);
530 val = readl(pci->dbi_base + reg); 531 dw_pcie_read(base + reg, size, &val);
531 exynos_pcie_sideband_dbi_r_mode(ep, false); 532 exynos_pcie_sideband_dbi_r_mode(ep, false);
532 return val; 533 return val;
533} 534}
534 535
535static void exynos_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) 536static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
537 u32 reg, size_t size, u32 val)
536{ 538{
537 struct exynos_pcie *ep = to_exynos_pcie(pci); 539 struct exynos_pcie *ep = to_exynos_pcie(pci);
538 540
539 exynos_pcie_sideband_dbi_w_mode(ep, true); 541 exynos_pcie_sideband_dbi_w_mode(ep, true);
540 writel(val, pci->dbi_base + reg); 542 dw_pcie_write(base + reg, size, val);
541 exynos_pcie_sideband_dbi_w_mode(ep, false); 543 exynos_pcie_sideband_dbi_w_mode(ep, false);
542} 544}
543 545
@@ -644,8 +646,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
644} 646}
645 647
646static const struct dw_pcie_ops dw_pcie_ops = { 648static const struct dw_pcie_ops dw_pcie_ops = {
647 .readl_dbi = exynos_pcie_readl_dbi, 649 .read_dbi = exynos_pcie_read_dbi,
648 .writel_dbi = exynos_pcie_writel_dbi, 650 .write_dbi = exynos_pcie_write_dbi,
649 .link_up = exynos_pcie_link_up, 651 .link_up = exynos_pcie_link_up,
650}; 652};
651 653
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 801e46cd266d..a98cba55c7f0 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
19#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 19#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
20#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/of_gpio.h> 22#include <linux/of_gpio.h>
22#include <linux/of_device.h> 23#include <linux/of_device.h>
@@ -27,6 +28,7 @@
27#include <linux/signal.h> 28#include <linux/signal.h>
28#include <linux/types.h> 29#include <linux/types.h>
29#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/reset.h>
30 32
31#include "pcie-designware.h" 33#include "pcie-designware.h"
32 34
@@ -36,6 +38,7 @@ enum imx6_pcie_variants {
36 IMX6Q, 38 IMX6Q,
37 IMX6SX, 39 IMX6SX,
38 IMX6QP, 40 IMX6QP,
41 IMX7D,
39}; 42};
40 43
41struct imx6_pcie { 44struct imx6_pcie {
@@ -47,6 +50,8 @@ struct imx6_pcie {
47 struct clk *pcie_inbound_axi; 50 struct clk *pcie_inbound_axi;
48 struct clk *pcie; 51 struct clk *pcie;
49 struct regmap *iomuxc_gpr; 52 struct regmap *iomuxc_gpr;
53 struct reset_control *pciephy_reset;
54 struct reset_control *apps_reset;
50 enum imx6_pcie_variants variant; 55 enum imx6_pcie_variants variant;
51 u32 tx_deemph_gen1; 56 u32 tx_deemph_gen1;
52 u32 tx_deemph_gen2_3p5db; 57 u32 tx_deemph_gen2_3p5db;
@@ -56,6 +61,11 @@ struct imx6_pcie {
56 int link_gen; 61 int link_gen;
57}; 62};
58 63
64/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
65#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
66#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
67#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
68
59/* PCIe Root Complex registers (memory-mapped) */ 69/* PCIe Root Complex registers (memory-mapped) */
60#define PCIE_RC_LCR 0x7c 70#define PCIE_RC_LCR 0x7c
61#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 71#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
@@ -248,6 +258,10 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
248static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 258static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
249{ 259{
250 switch (imx6_pcie->variant) { 260 switch (imx6_pcie->variant) {
261 case IMX7D:
262 reset_control_assert(imx6_pcie->pciephy_reset);
263 reset_control_assert(imx6_pcie->apps_reset);
264 break;
251 case IMX6SX: 265 case IMX6SX:
252 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 266 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
253 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 267 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
@@ -303,11 +317,32 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
303 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 317 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
304 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); 318 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
305 break; 319 break;
320 case IMX7D:
321 break;
306 } 322 }
307 323
308 return ret; 324 return ret;
309} 325}
310 326
327static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
328{
329 u32 val;
330 unsigned int retries;
331 struct device *dev = imx6_pcie->pci->dev;
332
333 for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
334 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
335
336 if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
337 return;
338
339 usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
340 PHY_PLL_LOCK_WAIT_USLEEP_MAX);
341 }
342
343 dev_err(dev, "PCIe PLL lock timeout\n");
344}
345
311static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) 346static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
312{ 347{
313 struct dw_pcie *pci = imx6_pcie->pci; 348 struct dw_pcie *pci = imx6_pcie->pci;
@@ -351,6 +386,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
351 } 386 }
352 387
353 switch (imx6_pcie->variant) { 388 switch (imx6_pcie->variant) {
389 case IMX7D:
390 reset_control_deassert(imx6_pcie->pciephy_reset);
391 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
392 break;
354 case IMX6SX: 393 case IMX6SX:
355 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, 394 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
356 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); 395 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
@@ -377,35 +416,44 @@ err_pcie_bus:
377 416
378static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) 417static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
379{ 418{
380 if (imx6_pcie->variant == IMX6SX) 419 switch (imx6_pcie->variant) {
420 case IMX7D:
421 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
422 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
423 break;
424 case IMX6SX:
381 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 425 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
382 IMX6SX_GPR12_PCIE_RX_EQ_MASK, 426 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
383 IMX6SX_GPR12_PCIE_RX_EQ_2); 427 IMX6SX_GPR12_PCIE_RX_EQ_2);
428 /* FALLTHROUGH */
429 default:
430 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
431 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
384 432
385 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 433 /* configure constant input signal to the pcie ctrl and phy */
386 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); 434 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
435 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
436
437 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
438 IMX6Q_GPR8_TX_DEEMPH_GEN1,
439 imx6_pcie->tx_deemph_gen1 << 0);
440 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
441 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
442 imx6_pcie->tx_deemph_gen2_3p5db << 6);
443 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
444 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
445 imx6_pcie->tx_deemph_gen2_6db << 12);
446 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
447 IMX6Q_GPR8_TX_SWING_FULL,
448 imx6_pcie->tx_swing_full << 18);
449 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
450 IMX6Q_GPR8_TX_SWING_LOW,
451 imx6_pcie->tx_swing_low << 25);
452 break;
453 }
387 454
388 /* configure constant input signal to the pcie ctrl and phy */
389 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 455 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
390 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); 456 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
391 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
392 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
393
394 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
395 IMX6Q_GPR8_TX_DEEMPH_GEN1,
396 imx6_pcie->tx_deemph_gen1 << 0);
397 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
398 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
399 imx6_pcie->tx_deemph_gen2_3p5db << 6);
400 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
401 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
402 imx6_pcie->tx_deemph_gen2_6db << 12);
403 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
404 IMX6Q_GPR8_TX_SWING_FULL,
405 imx6_pcie->tx_swing_full << 18);
406 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
407 IMX6Q_GPR8_TX_SWING_LOW,
408 imx6_pcie->tx_swing_low << 25);
409} 457}
410 458
411static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) 459static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
@@ -469,8 +517,11 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
469 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); 517 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
470 518
471 /* Start LTSSM. */ 519 /* Start LTSSM. */
472 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, 520 if (imx6_pcie->variant == IMX7D)
473 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); 521 reset_control_deassert(imx6_pcie->apps_reset);
522 else
523 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
524 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
474 525
475 ret = imx6_pcie_wait_for_link(imx6_pcie); 526 ret = imx6_pcie_wait_for_link(imx6_pcie);
476 if (ret) 527 if (ret)
@@ -482,29 +533,40 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
482 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; 533 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
483 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; 534 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
484 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); 535 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
485 } else {
486 dev_info(dev, "Link: Gen2 disabled\n");
487 }
488
489 /*
490 * Start Directed Speed Change so the best possible speed both link
491 * partners support can be negotiated.
492 */
493 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
494 tmp |= PORT_LOGIC_SPEED_CHANGE;
495 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
496 536
497 ret = imx6_pcie_wait_for_speed_change(imx6_pcie); 537 /*
498 if (ret) { 538 * Start Directed Speed Change so the best possible
499 dev_err(dev, "Failed to bring link up!\n"); 539 * speed both link partners support can be negotiated.
500 goto err_reset_phy; 540 */
501 } 541 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
542 tmp |= PORT_LOGIC_SPEED_CHANGE;
543 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
544
545 if (imx6_pcie->variant != IMX7D) {
546 /*
547 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
548 * from i.MX6 family when no link speed transition
549 * occurs and we go Gen1 -> yep, Gen1. The difference
550 * is that, in such case, it will not be cleared by HW
551 * which will cause the following code to report false
552 * failure.
553 */
554
555 ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
556 if (ret) {
557 dev_err(dev, "Failed to bring link up!\n");
558 goto err_reset_phy;
559 }
560 }
502 561
503 /* Make sure link training is finished as well! */ 562 /* Make sure link training is finished as well! */
504 ret = imx6_pcie_wait_for_link(imx6_pcie); 563 ret = imx6_pcie_wait_for_link(imx6_pcie);
505 if (ret) { 564 if (ret) {
506 dev_err(dev, "Failed to bring link up!\n"); 565 dev_err(dev, "Failed to bring link up!\n");
507 goto err_reset_phy; 566 goto err_reset_phy;
567 }
568 } else {
569 dev_info(dev, "Link: Gen2 disabled\n");
508 } 570 }
509 571
510 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); 572 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
@@ -544,8 +606,8 @@ static struct dw_pcie_host_ops imx6_pcie_host_ops = {
544 .host_init = imx6_pcie_host_init, 606 .host_init = imx6_pcie_host_init,
545}; 607};
546 608
547static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, 609static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
548 struct platform_device *pdev) 610 struct platform_device *pdev)
549{ 611{
550 struct dw_pcie *pci = imx6_pcie->pci; 612 struct dw_pcie *pci = imx6_pcie->pci;
551 struct pcie_port *pp = &pci->pp; 613 struct pcie_port *pp = &pci->pp;
@@ -585,7 +647,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
585 .link_up = imx6_pcie_link_up, 647 .link_up = imx6_pcie_link_up,
586}; 648};
587 649
588static int __init imx6_pcie_probe(struct platform_device *pdev) 650static int imx6_pcie_probe(struct platform_device *pdev)
589{ 651{
590 struct device *dev = &pdev->dev; 652 struct device *dev = &pdev->dev;
591 struct dw_pcie *pci; 653 struct dw_pcie *pci;
@@ -609,10 +671,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
609 imx6_pcie->variant = 671 imx6_pcie->variant =
610 (enum imx6_pcie_variants)of_device_get_match_data(dev); 672 (enum imx6_pcie_variants)of_device_get_match_data(dev);
611 673
612 /* Added for PCI abort handling */
613 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
614 "imprecise external abort");
615
616 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 674 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
617 pci->dbi_base = devm_ioremap_resource(dev, dbi_base); 675 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
618 if (IS_ERR(pci->dbi_base)) 676 if (IS_ERR(pci->dbi_base))
@@ -632,6 +690,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
632 dev_err(dev, "unable to get reset gpio\n"); 690 dev_err(dev, "unable to get reset gpio\n");
633 return ret; 691 return ret;
634 } 692 }
693 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
694 return imx6_pcie->reset_gpio;
635 } 695 }
636 696
637 /* Fetch clocks */ 697 /* Fetch clocks */
@@ -653,13 +713,31 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
653 return PTR_ERR(imx6_pcie->pcie); 713 return PTR_ERR(imx6_pcie->pcie);
654 } 714 }
655 715
656 if (imx6_pcie->variant == IMX6SX) { 716 switch (imx6_pcie->variant) {
717 case IMX6SX:
657 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, 718 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
658 "pcie_inbound_axi"); 719 "pcie_inbound_axi");
659 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { 720 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
660 dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); 721 dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
661 return PTR_ERR(imx6_pcie->pcie_inbound_axi); 722 return PTR_ERR(imx6_pcie->pcie_inbound_axi);
662 } 723 }
724 break;
725 case IMX7D:
726 imx6_pcie->pciephy_reset = devm_reset_control_get(dev,
727 "pciephy");
728 if (IS_ERR(imx6_pcie->pciephy_reset)) {
729 dev_err(dev, "Failed to get PCIEPHY reset control\n");
730 return PTR_ERR(imx6_pcie->pciephy_reset);
731 }
732
733 imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps");
734 if (IS_ERR(imx6_pcie->apps_reset)) {
735 dev_err(dev, "Failed to get PCIE APPS reset control\n");
736 return PTR_ERR(imx6_pcie->apps_reset);
737 }
738 break;
739 default:
740 break;
663 } 741 }
664 742
665 /* Grab GPR config register range */ 743 /* Grab GPR config register range */
@@ -718,6 +796,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
718 { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, 796 { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
719 { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, 797 { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
720 { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, 798 { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
799 { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
721 {}, 800 {},
722}; 801};
723 802
@@ -725,12 +804,24 @@ static struct platform_driver imx6_pcie_driver = {
725 .driver = { 804 .driver = {
726 .name = "imx6q-pcie", 805 .name = "imx6q-pcie",
727 .of_match_table = imx6_pcie_of_match, 806 .of_match_table = imx6_pcie_of_match,
807 .suppress_bind_attrs = true,
728 }, 808 },
809 .probe = imx6_pcie_probe,
729 .shutdown = imx6_pcie_shutdown, 810 .shutdown = imx6_pcie_shutdown,
730}; 811};
731 812
732static int __init imx6_pcie_init(void) 813static int __init imx6_pcie_init(void)
733{ 814{
734 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); 815 /*
816 * Since probe() can be deferred we need to make sure that
817 * hook_fault_code is not called after __init memory is freed
818 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
819 * we can install the handler here without risking it
820 * accessing some uninitialized driver state.
821 */
822 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
823 "imprecise external abort");
824
825 return platform_driver_register(&imx6_pcie_driver);
735} 826}
736device_initcall(imx6_pcie_init); 827device_initcall(imx6_pcie_init);
diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c
index 6b396f6b4615..8bc626e640c8 100644
--- a/drivers/pci/dwc/pci-keystone-dw.c
+++ b/drivers/pci/dwc/pci-keystone-dw.c
@@ -543,7 +543,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
543 543
544 /* Index 0 is the config reg. space address */ 544 /* Index 0 is the config reg. space address */
545 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 545 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
546 pci->dbi_base = devm_ioremap_resource(dev, res); 546 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
547 if (IS_ERR(pci->dbi_base)) 547 if (IS_ERR(pci->dbi_base))
548 return PTR_ERR(pci->dbi_base); 548 return PTR_ERR(pci->dbi_base);
549 549
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index c32e392a0ae6..27d638c4e134 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -283,7 +283,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
283 pcie->pci = pci; 283 pcie->pci = pci;
284 284
285 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 285 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
286 pci->dbi_base = devm_ioremap_resource(dev, dbi_base); 286 pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
287 if (IS_ERR(pci->dbi_base)) 287 if (IS_ERR(pci->dbi_base))
288 return PTR_ERR(pci->dbi_base); 288 return PTR_ERR(pci->dbi_base);
289 289
@@ -305,6 +305,7 @@ static struct platform_driver ls_pcie_driver = {
305 .driver = { 305 .driver = {
306 .name = "layerscape-pcie", 306 .name = "layerscape-pcie",
307 .of_match_table = ls_pcie_of_match, 307 .of_match_table = ls_pcie_of_match,
308 .suppress_bind_attrs = true,
308 }, 309 },
309}; 310};
310builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); 311builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index f110e3b24a26..495b023042b3 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -230,7 +230,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
230 230
231 /* Get the dw-pcie unit configuration/control registers base. */ 231 /* Get the dw-pcie unit configuration/control registers base. */
232 base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); 232 base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
233 pci->dbi_base = devm_ioremap_resource(dev, base); 233 pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
234 if (IS_ERR(pci->dbi_base)) { 234 if (IS_ERR(pci->dbi_base)) {
235 dev_err(dev, "couldn't remap regs base %p\n", base); 235 dev_err(dev, "couldn't remap regs base %p\n", base);
236 ret = PTR_ERR(pci->dbi_base); 236 ret = PTR_ERR(pci->dbi_base);
@@ -262,6 +262,7 @@ static struct platform_driver armada8k_pcie_driver = {
262 .driver = { 262 .driver = {
263 .name = "armada8k-pcie", 263 .name = "armada8k-pcie",
264 .of_match_table = of_match_ptr(armada8k_pcie_of_match), 264 .of_match_table = of_match_ptr(armada8k_pcie_of_match),
265 .suppress_bind_attrs = true,
265 }, 266 },
266}; 267};
267builtin_platform_driver(armada8k_pcie_driver); 268builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 6d23683c0892..82a04acc42fd 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -78,6 +78,11 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
78 regmap_write(artpec6_pcie->regmap, offset, val); 78 regmap_write(artpec6_pcie->regmap, offset, val);
79} 79}
80 80
81static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr)
82{
83 return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR;
84}
85
81static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) 86static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
82{ 87{
83 struct dw_pcie *pci = artpec6_pcie->pci; 88 struct dw_pcie *pci = artpec6_pcie->pci;
@@ -142,11 +147,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
142 */ 147 */
143 dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); 148 dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
144 149
145 pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
146 pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
147 pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR;
148 pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR;
149
150 /* setup root complex */ 150 /* setup root complex */
151 dw_pcie_setup_rc(pp); 151 dw_pcie_setup_rc(pp);
152 152
@@ -235,6 +235,7 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
235} 235}
236 236
237static const struct dw_pcie_ops dw_pcie_ops = { 237static const struct dw_pcie_ops dw_pcie_ops = {
238 .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
238}; 239};
239 240
240static int artpec6_pcie_probe(struct platform_device *pdev) 241static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -294,6 +295,7 @@ static struct platform_driver artpec6_pcie_driver = {
294 .driver = { 295 .driver = {
295 .name = "artpec6-pcie", 296 .name = "artpec6-pcie",
296 .of_match_table = artpec6_pcie_of_match, 297 .of_match_table = artpec6_pcie_of_match,
298 .suppress_bind_attrs = true,
297 }, 299 },
298}; 300};
299builtin_platform_driver(artpec6_pcie_driver); 301builtin_platform_driver(artpec6_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
new file mode 100644
index 000000000000..398406393f37
--- /dev/null
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -0,0 +1,342 @@
1/**
2 * Synopsys Designware PCIe Endpoint controller driver
3 *
4 * Copyright (C) 2017 Texas Instruments
5 * Author: Kishon Vijay Abraham I <kishon@ti.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 of
9 * the License as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/of.h>
21
22#include "pcie-designware.h"
23#include <linux/pci-epc.h>
24#include <linux/pci-epf.h>
25
26void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
27{
28 struct pci_epc *epc = ep->epc;
29
30 pci_epc_linkup(epc);
31}
32
33static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
34{
35 u32 reg;
36
37 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
38 dw_pcie_writel_dbi2(pci, reg, 0x0);
39 dw_pcie_writel_dbi(pci, reg, 0x0);
40}
41
42static int dw_pcie_ep_write_header(struct pci_epc *epc,
43 struct pci_epf_header *hdr)
44{
45 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
46 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
47
48 dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
49 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
50 dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
51 dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
52 dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
53 hdr->subclass_code | hdr->baseclass_code << 8);
54 dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
55 hdr->cache_line_size);
56 dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
57 hdr->subsys_vendor_id);
58 dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
59 dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
60 hdr->interrupt_pin);
61
62 return 0;
63}
64
65static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
66 dma_addr_t cpu_addr,
67 enum dw_pcie_as_type as_type)
68{
69 int ret;
70 u32 free_win;
71 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
72
73 free_win = find_first_zero_bit(&ep->ib_window_map,
74 sizeof(ep->ib_window_map));
75 if (free_win >= ep->num_ib_windows) {
76 dev_err(pci->dev, "no free inbound window\n");
77 return -EINVAL;
78 }
79
80 ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
81 as_type);
82 if (ret < 0) {
83 dev_err(pci->dev, "Failed to program IB window\n");
84 return ret;
85 }
86
87 ep->bar_to_atu[bar] = free_win;
88 set_bit(free_win, &ep->ib_window_map);
89
90 return 0;
91}
92
93static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
94 u64 pci_addr, size_t size)
95{
96 u32 free_win;
97 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
98
99 free_win = find_first_zero_bit(&ep->ob_window_map,
100 sizeof(ep->ob_window_map));
101 if (free_win >= ep->num_ob_windows) {
102 dev_err(pci->dev, "no free outbound window\n");
103 return -EINVAL;
104 }
105
106 dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
107 phys_addr, pci_addr, size);
108
109 set_bit(free_win, &ep->ob_window_map);
110 ep->outbound_addr[free_win] = phys_addr;
111
112 return 0;
113}
114
115static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
116{
117 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
118 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
119 u32 atu_index = ep->bar_to_atu[bar];
120
121 dw_pcie_ep_reset_bar(pci, bar);
122
123 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
124 clear_bit(atu_index, &ep->ib_window_map);
125}
126
127static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
128 dma_addr_t bar_phys, size_t size, int flags)
129{
130 int ret;
131 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
132 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
133 enum dw_pcie_as_type as_type;
134 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
135
136 if (!(flags & PCI_BASE_ADDRESS_SPACE))
137 as_type = DW_PCIE_AS_MEM;
138 else
139 as_type = DW_PCIE_AS_IO;
140
141 ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
142 if (ret)
143 return ret;
144
145 dw_pcie_writel_dbi2(pci, reg, size - 1);
146 dw_pcie_writel_dbi(pci, reg, flags);
147
148 return 0;
149}
150
151static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
152 u32 *atu_index)
153{
154 u32 index;
155
156 for (index = 0; index < ep->num_ob_windows; index++) {
157 if (ep->outbound_addr[index] != addr)
158 continue;
159 *atu_index = index;
160 return 0;
161 }
162
163 return -EINVAL;
164}
165
166static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
167{
168 int ret;
169 u32 atu_index;
170 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
171 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
172
173 ret = dw_pcie_find_index(ep, addr, &atu_index);
174 if (ret < 0)
175 return;
176
177 dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
178 clear_bit(atu_index, &ep->ob_window_map);
179}
180
181static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
182 u64 pci_addr, size_t size)
183{
184 int ret;
185 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
186 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
187
188 ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
189 if (ret) {
190 dev_err(pci->dev, "failed to enable address\n");
191 return ret;
192 }
193
194 return 0;
195}
196
197static int dw_pcie_ep_get_msi(struct pci_epc *epc)
198{
199 int val;
200 u32 lower_addr;
201 u32 upper_addr;
202 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
203 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
204
205 val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL);
206 val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
207
208 lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
209 upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
210
211 if (!(lower_addr || upper_addr))
212 return -EINVAL;
213
214 return val;
215}
216
217static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
218{
219 int val;
220 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
221 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
222
223 val = (encode_int << MSI_CAP_MMC_SHIFT);
224 dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
225
226 return 0;
227}
228
229static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
230 enum pci_epc_irq_type type, u8 interrupt_num)
231{
232 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
233
234 if (!ep->ops->raise_irq)
235 return -EINVAL;
236
237 return ep->ops->raise_irq(ep, type, interrupt_num);
238}
239
240static void dw_pcie_ep_stop(struct pci_epc *epc)
241{
242 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
243 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
244
245 if (!pci->ops->stop_link)
246 return;
247
248 pci->ops->stop_link(pci);
249}
250
251static int dw_pcie_ep_start(struct pci_epc *epc)
252{
253 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
254 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
255
256 if (!pci->ops->start_link)
257 return -EINVAL;
258
259 return pci->ops->start_link(pci);
260}
261
262static const struct pci_epc_ops epc_ops = {
263 .write_header = dw_pcie_ep_write_header,
264 .set_bar = dw_pcie_ep_set_bar,
265 .clear_bar = dw_pcie_ep_clear_bar,
266 .map_addr = dw_pcie_ep_map_addr,
267 .unmap_addr = dw_pcie_ep_unmap_addr,
268 .set_msi = dw_pcie_ep_set_msi,
269 .get_msi = dw_pcie_ep_get_msi,
270 .raise_irq = dw_pcie_ep_raise_irq,
271 .start = dw_pcie_ep_start,
272 .stop = dw_pcie_ep_stop,
273};
274
275void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
276{
277 struct pci_epc *epc = ep->epc;
278
279 pci_epc_mem_exit(epc);
280}
281
282int dw_pcie_ep_init(struct dw_pcie_ep *ep)
283{
284 int ret;
285 void *addr;
286 enum pci_barno bar;
287 struct pci_epc *epc;
288 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
289 struct device *dev = pci->dev;
290 struct device_node *np = dev->of_node;
291
292 if (!pci->dbi_base || !pci->dbi_base2) {
293 dev_err(dev, "dbi_base/deb_base2 is not populated\n");
294 return -EINVAL;
295 }
296
297 ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
298 if (ret < 0) {
299 dev_err(dev, "unable to read *num-ib-windows* property\n");
300 return ret;
301 }
302
303 ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
304 if (ret < 0) {
305 dev_err(dev, "unable to read *num-ob-windows* property\n");
306 return ret;
307 }
308
309 addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
310 GFP_KERNEL);
311 if (!addr)
312 return -ENOMEM;
313 ep->outbound_addr = addr;
314
315 for (bar = BAR_0; bar <= BAR_5; bar++)
316 dw_pcie_ep_reset_bar(pci, bar);
317
318 if (ep->ops->ep_init)
319 ep->ops->ep_init(ep);
320
321 epc = devm_pci_epc_create(dev, &epc_ops);
322 if (IS_ERR(epc)) {
323 dev_err(dev, "failed to create epc device\n");
324 return PTR_ERR(epc);
325 }
326
327 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
328 if (ret < 0)
329 epc->max_functions = 1;
330
331 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size);
332 if (ret < 0) {
333 dev_err(dev, "Failed to initialize address space\n");
334 return ret;
335 }
336
337 ep->epc = epc;
338 epc_set_drvdata(epc, ep);
339 dw_pcie_setup(pci);
340
341 return 0;
342}
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 5ba334938b52..28ed32ba4f1b 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -56,24 +56,25 @@ static struct irq_chip dw_msi_irq_chip = {
56/* MSI int handler */ 56/* MSI int handler */
57irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) 57irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
58{ 58{
59 unsigned long val; 59 u32 val;
60 int i, pos, irq; 60 int i, pos, irq;
61 irqreturn_t ret = IRQ_NONE; 61 irqreturn_t ret = IRQ_NONE;
62 62
63 for (i = 0; i < MAX_MSI_CTRLS; i++) { 63 for (i = 0; i < MAX_MSI_CTRLS; i++) {
64 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, 64 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
65 (u32 *)&val); 65 &val);
66 if (val) { 66 if (!val)
67 ret = IRQ_HANDLED; 67 continue;
68 pos = 0; 68
69 while ((pos = find_next_bit(&val, 32, pos)) != 32) { 69 ret = IRQ_HANDLED;
70 irq = irq_find_mapping(pp->irq_domain, 70 pos = 0;
71 i * 32 + pos); 71 while ((pos = find_next_bit((unsigned long *) &val, 32,
72 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + 72 pos)) != 32) {
73 i * 12, 4, 1 << pos); 73 irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
74 generic_handle_irq(irq); 74 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
75 pos++; 75 4, 1 << pos);
76 } 76 generic_handle_irq(irq);
77 pos++;
77 } 78 }
78 } 79 }
79 80
@@ -338,8 +339,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
338 } 339 }
339 340
340 if (!pci->dbi_base) { 341 if (!pci->dbi_base) {
341 pci->dbi_base = devm_ioremap(dev, pp->cfg->start, 342 pci->dbi_base = devm_pci_remap_cfgspace(dev,
342 resource_size(pp->cfg)); 343 pp->cfg->start,
344 resource_size(pp->cfg));
343 if (!pci->dbi_base) { 345 if (!pci->dbi_base) {
344 dev_err(dev, "error with ioremap\n"); 346 dev_err(dev, "error with ioremap\n");
345 ret = -ENOMEM; 347 ret = -ENOMEM;
@@ -350,8 +352,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
350 pp->mem_base = pp->mem->start; 352 pp->mem_base = pp->mem->start;
351 353
352 if (!pp->va_cfg0_base) { 354 if (!pp->va_cfg0_base) {
353 pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base, 355 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
354 pp->cfg0_size); 356 pp->cfg0_base, pp->cfg0_size);
355 if (!pp->va_cfg0_base) { 357 if (!pp->va_cfg0_base) {
356 dev_err(dev, "error with ioremap in function\n"); 358 dev_err(dev, "error with ioremap in function\n");
357 ret = -ENOMEM; 359 ret = -ENOMEM;
@@ -360,7 +362,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
360 } 362 }
361 363
362 if (!pp->va_cfg1_base) { 364 if (!pp->va_cfg1_base) {
363 pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base, 365 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
366 pp->cfg1_base,
364 pp->cfg1_size); 367 pp->cfg1_size);
365 if (!pp->va_cfg1_base) { 368 if (!pp->va_cfg1_base) {
366 dev_err(dev, "error with ioremap\n"); 369 dev_err(dev, "error with ioremap\n");
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index f20d494922ab..32091b32f6e1 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -133,6 +133,7 @@ static struct platform_driver dw_plat_pcie_driver = {
133 .driver = { 133 .driver = {
134 .name = "dw-pcie", 134 .name = "dw-pcie",
135 .of_match_table = dw_plat_pcie_of_match, 135 .of_match_table = dw_plat_pcie_of_match,
136 .suppress_bind_attrs = true,
136 }, 137 },
137 .probe = dw_plat_pcie_probe, 138 .probe = dw_plat_pcie_probe,
138}; 139};
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 7e1fb7d6643c..0e03af279259 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -61,91 +61,253 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val)
61 return PCIBIOS_SUCCESSFUL; 61 return PCIBIOS_SUCCESSFUL;
62} 62}
63 63
64u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) 64u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
65 size_t size)
65{ 66{
66 if (pci->ops->readl_dbi) 67 int ret;
67 return pci->ops->readl_dbi(pci, reg); 68 u32 val;
68 69
69 return readl(pci->dbi_base + reg); 70 if (pci->ops->read_dbi)
71 return pci->ops->read_dbi(pci, base, reg, size);
72
73 ret = dw_pcie_read(base + reg, size, &val);
74 if (ret)
75 dev_err(pci->dev, "read DBI address failed\n");
76
77 return val;
70} 78}
71 79
72void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) 80void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
81 size_t size, u32 val)
73{ 82{
74 if (pci->ops->writel_dbi) 83 int ret;
75 pci->ops->writel_dbi(pci, reg, val); 84
76 else 85 if (pci->ops->write_dbi) {
77 writel(val, pci->dbi_base + reg); 86 pci->ops->write_dbi(pci, base, reg, size, val);
87 return;
88 }
89
90 ret = dw_pcie_write(base + reg, size, val);
91 if (ret)
92 dev_err(pci->dev, "write DBI address failed\n");
78} 93}
79 94
80static u32 dw_pcie_readl_unroll(struct dw_pcie *pci, u32 index, u32 reg) 95static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
81{ 96{
82 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 97 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
83 98
84 return dw_pcie_readl_dbi(pci, offset + reg); 99 return dw_pcie_readl_dbi(pci, offset + reg);
85} 100}
86 101
87static void dw_pcie_writel_unroll(struct dw_pcie *pci, u32 index, u32 reg, 102static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
88 u32 val) 103 u32 val)
89{ 104{
90 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 105 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
91 106
92 dw_pcie_writel_dbi(pci, offset + reg, val); 107 dw_pcie_writel_dbi(pci, offset + reg, val);
93} 108}
94 109
110void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type,
111 u64 cpu_addr, u64 pci_addr, u32 size)
112{
113 u32 retries, val;
114
115 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
116 lower_32_bits(cpu_addr));
117 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
118 upper_32_bits(cpu_addr));
119 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
120 lower_32_bits(cpu_addr + size - 1));
121 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
122 lower_32_bits(pci_addr));
123 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
124 upper_32_bits(pci_addr));
125 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
126 type);
127 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
128 PCIE_ATU_ENABLE);
129
130 /*
131 * Make sure ATU enable takes effect before any subsequent config
132 * and I/O accesses.
133 */
134 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
135 val = dw_pcie_readl_ob_unroll(pci, index,
136 PCIE_ATU_UNR_REGION_CTRL2);
137 if (val & PCIE_ATU_ENABLE)
138 return;
139
140 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
141 }
142 dev_err(pci->dev, "outbound iATU is not being enabled\n");
143}
144
95void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, 145void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
96 u64 cpu_addr, u64 pci_addr, u32 size) 146 u64 cpu_addr, u64 pci_addr, u32 size)
97{ 147{
98 u32 retries, val; 148 u32 retries, val;
99 149
150 if (pci->ops->cpu_addr_fixup)
151 cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
152
100 if (pci->iatu_unroll_enabled) { 153 if (pci->iatu_unroll_enabled) {
101 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, 154 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
102 lower_32_bits(cpu_addr)); 155 pci_addr, size);
103 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, 156 return;
104 upper_32_bits(cpu_addr));
105 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
106 lower_32_bits(cpu_addr + size - 1));
107 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
108 lower_32_bits(pci_addr));
109 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
110 upper_32_bits(pci_addr));
111 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
112 type);
113 dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
114 PCIE_ATU_ENABLE);
115 } else {
116 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
117 PCIE_ATU_REGION_OUTBOUND | index);
118 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
119 lower_32_bits(cpu_addr));
120 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
121 upper_32_bits(cpu_addr));
122 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
123 lower_32_bits(cpu_addr + size - 1));
124 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
125 lower_32_bits(pci_addr));
126 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
127 upper_32_bits(pci_addr));
128 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
129 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
130 } 157 }
131 158
159 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
160 PCIE_ATU_REGION_OUTBOUND | index);
161 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
162 lower_32_bits(cpu_addr));
163 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
164 upper_32_bits(cpu_addr));
165 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
166 lower_32_bits(cpu_addr + size - 1));
167 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
168 lower_32_bits(pci_addr));
169 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
170 upper_32_bits(pci_addr));
171 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
172 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
173
132 /* 174 /*
133 * Make sure ATU enable takes effect before any subsequent config 175 * Make sure ATU enable takes effect before any subsequent config
134 * and I/O accesses. 176 * and I/O accesses.
135 */ 177 */
136 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 178 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
137 if (pci->iatu_unroll_enabled) 179 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
138 val = dw_pcie_readl_unroll(pci, index,
139 PCIE_ATU_UNR_REGION_CTRL2);
140 else
141 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
142
143 if (val == PCIE_ATU_ENABLE) 180 if (val == PCIE_ATU_ENABLE)
144 return; 181 return;
145 182
146 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 183 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
147 } 184 }
148 dev_err(pci->dev, "iATU is not being enabled\n"); 185 dev_err(pci->dev, "outbound iATU is not being enabled\n");
186}
187
188static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
189{
190 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
191
192 return dw_pcie_readl_dbi(pci, offset + reg);
193}
194
195static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
196 u32 val)
197{
198 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
199
200 dw_pcie_writel_dbi(pci, offset + reg, val);
201}
202
203int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar,
204 u64 cpu_addr, enum dw_pcie_as_type as_type)
205{
206 int type;
207 u32 retries, val;
208
209 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
210 lower_32_bits(cpu_addr));
211 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
212 upper_32_bits(cpu_addr));
213
214 switch (as_type) {
215 case DW_PCIE_AS_MEM:
216 type = PCIE_ATU_TYPE_MEM;
217 break;
218 case DW_PCIE_AS_IO:
219 type = PCIE_ATU_TYPE_IO;
220 break;
221 default:
222 return -EINVAL;
223 }
224
225 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
226 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
227 PCIE_ATU_ENABLE |
228 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
229
230 /*
231 * Make sure ATU enable takes effect before any subsequent config
232 * and I/O accesses.
233 */
234 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
235 val = dw_pcie_readl_ib_unroll(pci, index,
236 PCIE_ATU_UNR_REGION_CTRL2);
237 if (val & PCIE_ATU_ENABLE)
238 return 0;
239
240 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
241 }
242 dev_err(pci->dev, "inbound iATU is not being enabled\n");
243
244 return -EBUSY;
245}
246
247int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
248 u64 cpu_addr, enum dw_pcie_as_type as_type)
249{
250 int type;
251 u32 retries, val;
252
253 if (pci->iatu_unroll_enabled)
254 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
255 cpu_addr, as_type);
256
257 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
258 index);
259 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
260 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
261
262 switch (as_type) {
263 case DW_PCIE_AS_MEM:
264 type = PCIE_ATU_TYPE_MEM;
265 break;
266 case DW_PCIE_AS_IO:
267 type = PCIE_ATU_TYPE_IO;
268 break;
269 default:
270 return -EINVAL;
271 }
272
273 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
274 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
275 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
276
277 /*
278 * Make sure ATU enable takes effect before any subsequent config
279 * and I/O accesses.
280 */
281 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
282 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
283 if (val & PCIE_ATU_ENABLE)
284 return 0;
285
286 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
287 }
288 dev_err(pci->dev, "inbound iATU is not being enabled\n");
289
290 return -EBUSY;
291}
292
293void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
294 enum dw_pcie_region_type type)
295{
296 int region;
297
298 switch (type) {
299 case DW_PCIE_REGION_INBOUND:
300 region = PCIE_ATU_REGION_INBOUND;
301 break;
302 case DW_PCIE_REGION_OUTBOUND:
303 region = PCIE_ATU_REGION_OUTBOUND;
304 break;
305 default:
306 return;
307 }
308
309 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
310 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
149} 311}
150 312
151int dw_pcie_wait_for_link(struct dw_pcie *pci) 313int dw_pcie_wait_for_link(struct dw_pcie *pci)
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index cd3b8713fe50..c6a840575796 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -18,6 +18,9 @@
18#include <linux/msi.h> 18#include <linux/msi.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20 20
21#include <linux/pci-epc.h>
22#include <linux/pci-epf.h>
23
21/* Parameters for the waiting for link up routine */ 24/* Parameters for the waiting for link up routine */
22#define LINK_WAIT_MAX_RETRIES 10 25#define LINK_WAIT_MAX_RETRIES 10
23#define LINK_WAIT_USLEEP_MIN 90000 26#define LINK_WAIT_USLEEP_MIN 90000
@@ -89,6 +92,16 @@
89#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ 92#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
90 ((0x3 << 20) | ((region) << 9)) 93 ((0x3 << 20) | ((region) << 9))
91 94
95#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
96 ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
97
98#define MSI_MESSAGE_CONTROL 0x52
99#define MSI_CAP_MMC_SHIFT 1
100#define MSI_CAP_MME_SHIFT 4
101#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
102#define MSI_MESSAGE_ADDR_L32 0x54
103#define MSI_MESSAGE_ADDR_U32 0x58
104
92/* 105/*
93 * Maximum number of MSI IRQs can be 256 per controller. But keep 106 * Maximum number of MSI IRQs can be 256 per controller. But keep
94 * it 32 as of now. Probably we will never need more than 32. If needed, 107 * it 32 as of now. Probably we will never need more than 32. If needed,
@@ -99,6 +112,20 @@
99 112
100struct pcie_port; 113struct pcie_port;
101struct dw_pcie; 114struct dw_pcie;
115struct dw_pcie_ep;
116
117enum dw_pcie_region_type {
118 DW_PCIE_REGION_UNKNOWN,
119 DW_PCIE_REGION_INBOUND,
120 DW_PCIE_REGION_OUTBOUND,
121};
122
123enum dw_pcie_device_mode {
124 DW_PCIE_UNKNOWN_TYPE,
125 DW_PCIE_EP_TYPE,
126 DW_PCIE_LEG_EP_TYPE,
127 DW_PCIE_RC_TYPE,
128};
102 129
103struct dw_pcie_host_ops { 130struct dw_pcie_host_ops {
104 int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); 131 int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
@@ -142,35 +169,116 @@ struct pcie_port {
142 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); 169 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
143}; 170};
144 171
172enum dw_pcie_as_type {
173 DW_PCIE_AS_UNKNOWN,
174 DW_PCIE_AS_MEM,
175 DW_PCIE_AS_IO,
176};
177
178struct dw_pcie_ep_ops {
179 void (*ep_init)(struct dw_pcie_ep *ep);
180 int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
181 u8 interrupt_num);
182};
183
184struct dw_pcie_ep {
185 struct pci_epc *epc;
186 struct dw_pcie_ep_ops *ops;
187 phys_addr_t phys_base;
188 size_t addr_size;
189 u8 bar_to_atu[6];
190 phys_addr_t *outbound_addr;
191 unsigned long ib_window_map;
192 unsigned long ob_window_map;
193 u32 num_ib_windows;
194 u32 num_ob_windows;
195};
196
145struct dw_pcie_ops { 197struct dw_pcie_ops {
146 u32 (*readl_dbi)(struct dw_pcie *pcie, u32 reg); 198 u64 (*cpu_addr_fixup)(u64 cpu_addr);
147 void (*writel_dbi)(struct dw_pcie *pcie, u32 reg, u32 val); 199 u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
200 size_t size);
201 void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
202 size_t size, u32 val);
148 int (*link_up)(struct dw_pcie *pcie); 203 int (*link_up)(struct dw_pcie *pcie);
204 int (*start_link)(struct dw_pcie *pcie);
205 void (*stop_link)(struct dw_pcie *pcie);
149}; 206};
150 207
151struct dw_pcie { 208struct dw_pcie {
152 struct device *dev; 209 struct device *dev;
153 void __iomem *dbi_base; 210 void __iomem *dbi_base;
211 void __iomem *dbi_base2;
154 u32 num_viewport; 212 u32 num_viewport;
155 u8 iatu_unroll_enabled; 213 u8 iatu_unroll_enabled;
156 struct pcie_port pp; 214 struct pcie_port pp;
215 struct dw_pcie_ep ep;
157 const struct dw_pcie_ops *ops; 216 const struct dw_pcie_ops *ops;
158}; 217};
159 218
160#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) 219#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
161 220
221#define to_dw_pcie_from_ep(endpoint) \
222 container_of((endpoint), struct dw_pcie, ep)
223
162int dw_pcie_read(void __iomem *addr, int size, u32 *val); 224int dw_pcie_read(void __iomem *addr, int size, u32 *val);
163int dw_pcie_write(void __iomem *addr, int size, u32 val); 225int dw_pcie_write(void __iomem *addr, int size, u32 val);
164 226
165u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg); 227u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
166void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val); 228 size_t size);
229void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
230 size_t size, u32 val);
167int dw_pcie_link_up(struct dw_pcie *pci); 231int dw_pcie_link_up(struct dw_pcie *pci);
168int dw_pcie_wait_for_link(struct dw_pcie *pci); 232int dw_pcie_wait_for_link(struct dw_pcie *pci);
169void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, 233void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
170 int type, u64 cpu_addr, u64 pci_addr, 234 int type, u64 cpu_addr, u64 pci_addr,
171 u32 size); 235 u32 size);
236int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
237 u64 cpu_addr, enum dw_pcie_as_type as_type);
238void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
239 enum dw_pcie_region_type type);
172void dw_pcie_setup(struct dw_pcie *pci); 240void dw_pcie_setup(struct dw_pcie *pci);
173 241
242static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
243{
244 __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val);
245}
246
247static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
248{
249 return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4);
250}
251
252static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
253{
254 __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val);
255}
256
257static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
258{
259 return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2);
260}
261
262static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
263{
264 __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val);
265}
266
267static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
268{
269 return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1);
270}
271
272static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
273{
274 __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
275}
276
277static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
278{
279 return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
280}
281
174#ifdef CONFIG_PCIE_DW_HOST 282#ifdef CONFIG_PCIE_DW_HOST
175irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); 283irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
176void dw_pcie_msi_init(struct pcie_port *pp); 284void dw_pcie_msi_init(struct pcie_port *pp);
@@ -195,4 +303,23 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
195 return 0; 303 return 0;
196} 304}
197#endif 305#endif
306
307#ifdef CONFIG_PCIE_DW_EP
308void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
309int dw_pcie_ep_init(struct dw_pcie_ep *ep);
310void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
311#else
312static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
313{
314}
315
316static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
317{
318 return 0;
319}
320
321static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
322{
323}
324#endif
198#endif /* _PCIE_DESIGNWARE_H */ 325#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c
index cf9d6a9d9fd4..e51acee0ddf3 100644
--- a/drivers/pci/dwc/pcie-hisi.c
+++ b/drivers/pci/dwc/pcie-hisi.c
@@ -99,7 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
99 return -ENOMEM; 99 return -ENOMEM;
100 } 100 }
101 101
102 reg_base = devm_ioremap(dev, res->start, resource_size(res)); 102 reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
103 if (!reg_base) 103 if (!reg_base)
104 return -ENOMEM; 104 return -ENOMEM;
105 105
@@ -296,10 +296,9 @@ static int hisi_pcie_probe(struct platform_device *pdev)
296 } 296 }
297 297
298 reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); 298 reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
299 pci->dbi_base = devm_ioremap_resource(dev, reg); 299 pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
300 if (IS_ERR(pci->dbi_base)) 300 if (IS_ERR(pci->dbi_base))
301 return PTR_ERR(pci->dbi_base); 301 return PTR_ERR(pci->dbi_base);
302
303 platform_set_drvdata(pdev, hisi_pcie); 302 platform_set_drvdata(pdev, hisi_pcie);
304 303
305 ret = hisi_add_pcie_port(hisi_pcie, pdev); 304 ret = hisi_add_pcie_port(hisi_pcie, pdev);
@@ -334,6 +333,7 @@ static struct platform_driver hisi_pcie_driver = {
334 .driver = { 333 .driver = {
335 .name = "hisi-pcie", 334 .name = "hisi-pcie",
336 .of_match_table = hisi_pcie_of_match, 335 .of_match_table = hisi_pcie_of_match,
336 .suppress_bind_attrs = true,
337 }, 337 },
338}; 338};
339builtin_platform_driver(hisi_pcie_driver); 339builtin_platform_driver(hisi_pcie_driver);
@@ -360,7 +360,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
360 return -EINVAL; 360 return -EINVAL;
361 } 361 }
362 362
363 reg_base = devm_ioremap(dev, res->start, resource_size(res)); 363 reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
364 if (!reg_base) 364 if (!reg_base)
365 return -ENOMEM; 365 return -ENOMEM;
366 366
@@ -395,6 +395,7 @@ static struct platform_driver hisi_pcie_almost_ecam_driver = {
395 .driver = { 395 .driver = {
396 .name = "hisi-pcie-almost-ecam", 396 .name = "hisi-pcie-almost-ecam",
397 .of_match_table = hisi_pcie_almost_ecam_of_match, 397 .of_match_table = hisi_pcie_almost_ecam_of_match,
398 .suppress_bind_attrs = true,
398 }, 399 },
399}; 400};
400builtin_platform_driver(hisi_pcie_almost_ecam_driver); 401builtin_platform_driver(hisi_pcie_almost_ecam_driver);
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 67eb7f5926dd..5bf23d432fdb 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -700,7 +700,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
700 return PTR_ERR(pcie->parf); 700 return PTR_ERR(pcie->parf);
701 701
702 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 702 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
703 pci->dbi_base = devm_ioremap_resource(dev, res); 703 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
704 if (IS_ERR(pci->dbi_base)) 704 if (IS_ERR(pci->dbi_base))
705 return PTR_ERR(pci->dbi_base); 705 return PTR_ERR(pci->dbi_base);
706 706
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index eaa4ea8e2ea4..8ff36b3dbbdf 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -273,7 +273,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
273 } 273 }
274 274
275 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 275 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
276 pci->dbi_base = devm_ioremap_resource(dev, dbi_base); 276 pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
277 if (IS_ERR(pci->dbi_base)) { 277 if (IS_ERR(pci->dbi_base)) {
278 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); 278 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
279 ret = PTR_ERR(pci->dbi_base); 279 ret = PTR_ERR(pci->dbi_base);
@@ -308,6 +308,7 @@ static struct platform_driver spear13xx_pcie_driver = {
308 .driver = { 308 .driver = {
309 .name = "spear-pcie", 309 .name = "spear-pcie",
310 .of_match_table = of_match_ptr(spear13xx_pcie_of_match), 310 .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
311 .suppress_bind_attrs = true,
311 }, 312 },
312}; 313};
313 314