aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/host')
-rw-r--r--drivers/pci/host/Kconfig6
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-exynos.c112
-rw-r--r--drivers/pci/host/pci-imx6.c575
-rw-r--r--drivers/pci/host/pci-tegra.c4
-rw-r--r--drivers/pci/host/pcie-designware.c240
-rw-r--r--drivers/pci/host/pcie-designware.h14
7 files changed, 950 insertions, 2 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 3d9504811126..efa24d9a3361 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -15,6 +15,12 @@ config PCI_EXYNOS
15 select PCIEPORTBUS 15 select PCIEPORTBUS
16 select PCIE_DW 16 select PCIE_DW
17 17
18config PCI_IMX6
19 bool "Freescale i.MX6 PCIe controller"
20 depends on SOC_IMX6Q
21 select PCIEPORTBUS
22 select PCIE_DW
23
18config PCI_TEGRA 24config PCI_TEGRA
19 bool "NVIDIA Tegra PCIe controller" 25 bool "NVIDIA Tegra PCIe controller"
20 depends on ARCH_TEGRA 26 depends on ARCH_TEGRA
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index c9a997b2690d..287d6a053dda 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_PCIE_DW) += pcie-designware.o 1obj-$(CONFIG_PCIE_DW) += pcie-designware.o
2obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o 2obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
3obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
3obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o 4obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
4obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o 5obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 94e096bb2d0a..ee692c2c3d73 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -48,6 +48,7 @@ struct exynos_pcie {
48#define PCIE_IRQ_SPECIAL 0x008 48#define PCIE_IRQ_SPECIAL 0x008
49#define PCIE_IRQ_EN_PULSE 0x00c 49#define PCIE_IRQ_EN_PULSE 0x00c
50#define PCIE_IRQ_EN_LEVEL 0x010 50#define PCIE_IRQ_EN_LEVEL 0x010
51#define IRQ_MSI_ENABLE (0x1 << 2)
51#define PCIE_IRQ_EN_SPECIAL 0x014 52#define PCIE_IRQ_EN_SPECIAL 0x014
52#define PCIE_PWR_RESET 0x018 53#define PCIE_PWR_RESET 0x018
53#define PCIE_CORE_RESET 0x01c 54#define PCIE_CORE_RESET 0x01c
@@ -77,18 +78,28 @@ struct exynos_pcie {
77#define PCIE_PHY_PLL_BIAS 0x00c 78#define PCIE_PHY_PLL_BIAS 0x00c
78#define PCIE_PHY_DCC_FEEDBACK 0x014 79#define PCIE_PHY_DCC_FEEDBACK 0x014
79#define PCIE_PHY_PLL_DIV_1 0x05c 80#define PCIE_PHY_PLL_DIV_1 0x05c
81#define PCIE_PHY_COMMON_POWER 0x064
82#define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
80#define PCIE_PHY_TRSV0_EMP_LVL 0x084 83#define PCIE_PHY_TRSV0_EMP_LVL 0x084
81#define PCIE_PHY_TRSV0_DRV_LVL 0x088 84#define PCIE_PHY_TRSV0_DRV_LVL 0x088
82#define PCIE_PHY_TRSV0_RXCDR 0x0ac 85#define PCIE_PHY_TRSV0_RXCDR 0x0ac
86#define PCIE_PHY_TRSV0_POWER 0x0c4
87#define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
83#define PCIE_PHY_TRSV0_LVCC 0x0dc 88#define PCIE_PHY_TRSV0_LVCC 0x0dc
84#define PCIE_PHY_TRSV1_EMP_LVL 0x144 89#define PCIE_PHY_TRSV1_EMP_LVL 0x144
85#define PCIE_PHY_TRSV1_RXCDR 0x16c 90#define PCIE_PHY_TRSV1_RXCDR 0x16c
91#define PCIE_PHY_TRSV1_POWER 0x184
92#define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
86#define PCIE_PHY_TRSV1_LVCC 0x19c 93#define PCIE_PHY_TRSV1_LVCC 0x19c
87#define PCIE_PHY_TRSV2_EMP_LVL 0x204 94#define PCIE_PHY_TRSV2_EMP_LVL 0x204
88#define PCIE_PHY_TRSV2_RXCDR 0x22c 95#define PCIE_PHY_TRSV2_RXCDR 0x22c
96#define PCIE_PHY_TRSV2_POWER 0x244
97#define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
89#define PCIE_PHY_TRSV2_LVCC 0x25c 98#define PCIE_PHY_TRSV2_LVCC 0x25c
90#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 99#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
91#define PCIE_PHY_TRSV3_RXCDR 0x2ec 100#define PCIE_PHY_TRSV3_RXCDR 0x2ec
101#define PCIE_PHY_TRSV3_POWER 0x304
102#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
92#define PCIE_PHY_TRSV3_LVCC 0x31c 103#define PCIE_PHY_TRSV3_LVCC 0x31c
93 104
94static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg) 105static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
@@ -202,6 +213,58 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
202 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET); 213 exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
203} 214}
204 215
216static void exynos_pcie_power_on_phy(struct pcie_port *pp)
217{
218 u32 val;
219 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
220
221 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
222 val &= ~PCIE_PHY_COMMON_PD_CMN;
223 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
224
225 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
226 val &= ~PCIE_PHY_TRSV0_PD_TSV;
227 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
228
229 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
230 val &= ~PCIE_PHY_TRSV1_PD_TSV;
231 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
232
233 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
234 val &= ~PCIE_PHY_TRSV2_PD_TSV;
235 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
236
237 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
238 val &= ~PCIE_PHY_TRSV3_PD_TSV;
239 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
240}
241
242static void exynos_pcie_power_off_phy(struct pcie_port *pp)
243{
244 u32 val;
245 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
246
247 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
248 val |= PCIE_PHY_COMMON_PD_CMN;
249 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
250
251 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
252 val |= PCIE_PHY_TRSV0_PD_TSV;
253 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
254
255 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
256 val |= PCIE_PHY_TRSV1_PD_TSV;
257 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
258
259 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
260 val |= PCIE_PHY_TRSV2_PD_TSV;
261 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
262
263 val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
264 val |= PCIE_PHY_TRSV3_PD_TSV;
265 exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
266}
267
205static void exynos_pcie_init_phy(struct pcie_port *pp) 268static void exynos_pcie_init_phy(struct pcie_port *pp)
206{ 269{
207 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); 270 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
@@ -270,6 +333,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
270 /* de-assert phy reset */ 333 /* de-assert phy reset */
271 exynos_pcie_deassert_phy_reset(pp); 334 exynos_pcie_deassert_phy_reset(pp);
272 335
336 /* power on phy */
337 exynos_pcie_power_on_phy(pp);
338
273 /* initialize phy */ 339 /* initialize phy */
274 exynos_pcie_init_phy(pp); 340 exynos_pcie_init_phy(pp);
275 341
@@ -302,6 +368,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
302 PCIE_PHY_PLL_LOCKED); 368 PCIE_PHY_PLL_LOCKED);
303 dev_info(pp->dev, "PLL Locked: 0x%x\n", val); 369 dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
304 } 370 }
371 /* power off phy */
372 exynos_pcie_power_off_phy(pp);
373
305 dev_err(pp->dev, "PCIe Link Fail\n"); 374 dev_err(pp->dev, "PCIe Link Fail\n");
306 return -EINVAL; 375 return -EINVAL;
307 } 376 }
@@ -342,9 +411,36 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
342 return IRQ_HANDLED; 411 return IRQ_HANDLED;
343} 412}
344 413
414static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
415{
416 struct pcie_port *pp = arg;
417
418 dw_handle_msi_irq(pp);
419
420 return IRQ_HANDLED;
421}
422
423static void exynos_pcie_msi_init(struct pcie_port *pp)
424{
425 u32 val;
426 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
427
428 dw_pcie_msi_init(pp);
429
430 /* enable MSI interrupt */
431 val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
432 val |= IRQ_MSI_ENABLE;
433 exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
434 return;
435}
436
345static void exynos_pcie_enable_interrupts(struct pcie_port *pp) 437static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
346{ 438{
347 exynos_pcie_enable_irq_pulse(pp); 439 exynos_pcie_enable_irq_pulse(pp);
440
441 if (IS_ENABLED(CONFIG_PCI_MSI))
442 exynos_pcie_msi_init(pp);
443
348 return; 444 return;
349} 445}
350 446
@@ -430,6 +526,22 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
430 return ret; 526 return ret;
431 } 527 }
432 528
529 if (IS_ENABLED(CONFIG_PCI_MSI)) {
530 pp->msi_irq = platform_get_irq(pdev, 0);
531 if (!pp->msi_irq) {
532 dev_err(&pdev->dev, "failed to get msi irq\n");
533 return -ENODEV;
534 }
535
536 ret = devm_request_irq(&pdev->dev, pp->msi_irq,
537 exynos_pcie_msi_irq_handler,
538 IRQF_SHARED, "exynos-pcie", pp);
539 if (ret) {
540 dev_err(&pdev->dev, "failed to request msi irq\n");
541 return ret;
542 }
543 }
544
433 pp->root_bus_nr = -1; 545 pp->root_bus_nr = -1;
434 pp->ops = &exynos_pcie_host_ops; 546 pp->ops = &exynos_pcie_host_ops;
435 547
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644
index 000000000000..5afa9226a080
--- /dev/null
+++ b/drivers/pci/host/pci-imx6.c
@@ -0,0 +1,575 @@
1/*
2 * PCIe host controller driver for Freescale i.MX6 SoCs
3 *
4 * Copyright (C) 2013 Kosagi
5 * http://www.kosagi.com
6 *
7 * Author: Sean Cross <xobs@kosagi.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/gpio.h>
17#include <linux/kernel.h>
18#include <linux/mfd/syscon.h>
19#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
20#include <linux/module.h>
21#include <linux/of_gpio.h>
22#include <linux/pci.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/resource.h>
26#include <linux/signal.h>
27#include <linux/types.h>
28
29#include "pcie-designware.h"
30
31#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
32
33struct imx6_pcie {
34 int reset_gpio;
35 int power_on_gpio;
36 int wake_up_gpio;
37 int disable_gpio;
38 struct clk *lvds_gate;
39 struct clk *sata_ref_100m;
40 struct clk *pcie_ref_125m;
41 struct clk *pcie_axi;
42 struct pcie_port pp;
43 struct regmap *iomuxc_gpr;
44 void __iomem *mem_base;
45};
46
47/* PCIe Port Logic registers (memory-mapped) */
48#define PL_OFFSET 0x700
49#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
50#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
51
52#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
53#define PCIE_PHY_CTRL_DATA_LOC 0
54#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
55#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
56#define PCIE_PHY_CTRL_WR_LOC 18
57#define PCIE_PHY_CTRL_RD_LOC 19
58
59#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
60#define PCIE_PHY_STAT_ACK_LOC 16
61
62/* PHY registers (not memory-mapped) */
63#define PCIE_PHY_RX_ASIC_OUT 0x100D
64
65#define PHY_RX_OVRD_IN_LO 0x1005
66#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
67#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
68
69static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
70{
71 u32 val;
72 u32 max_iterations = 10;
73 u32 wait_counter = 0;
74
75 do {
76 val = readl(dbi_base + PCIE_PHY_STAT);
77 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
78 wait_counter++;
79
80 if (val == exp_val)
81 return 0;
82
83 udelay(1);
84 } while (wait_counter < max_iterations);
85
86 return -ETIMEDOUT;
87}
88
89static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
90{
91 u32 val;
92 int ret;
93
94 val = addr << PCIE_PHY_CTRL_DATA_LOC;
95 writel(val, dbi_base + PCIE_PHY_CTRL);
96
97 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
98 writel(val, dbi_base + PCIE_PHY_CTRL);
99
100 ret = pcie_phy_poll_ack(dbi_base, 1);
101 if (ret)
102 return ret;
103
104 val = addr << PCIE_PHY_CTRL_DATA_LOC;
105 writel(val, dbi_base + PCIE_PHY_CTRL);
106
107 ret = pcie_phy_poll_ack(dbi_base, 0);
108 if (ret)
109 return ret;
110
111 return 0;
112}
113
114/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
115static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
116{
117 u32 val, phy_ctl;
118 int ret;
119
120 ret = pcie_phy_wait_ack(dbi_base, addr);
121 if (ret)
122 return ret;
123
124 /* assert Read signal */
125 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
126 writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
127
128 ret = pcie_phy_poll_ack(dbi_base, 1);
129 if (ret)
130 return ret;
131
132 val = readl(dbi_base + PCIE_PHY_STAT);
133 *data = val & 0xffff;
134
135 /* deassert Read signal */
136 writel(0x00, dbi_base + PCIE_PHY_CTRL);
137
138 ret = pcie_phy_poll_ack(dbi_base, 0);
139 if (ret)
140 return ret;
141
142 return 0;
143}
144
145static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
146{
147 u32 var;
148 int ret;
149
150 /* write addr */
151 /* cap addr */
152 ret = pcie_phy_wait_ack(dbi_base, addr);
153 if (ret)
154 return ret;
155
156 var = data << PCIE_PHY_CTRL_DATA_LOC;
157 writel(var, dbi_base + PCIE_PHY_CTRL);
158
159 /* capture data */
160 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
161 writel(var, dbi_base + PCIE_PHY_CTRL);
162
163 ret = pcie_phy_poll_ack(dbi_base, 1);
164 if (ret)
165 return ret;
166
167 /* deassert cap data */
168 var = data << PCIE_PHY_CTRL_DATA_LOC;
169 writel(var, dbi_base + PCIE_PHY_CTRL);
170
171 /* wait for ack de-assertion */
172 ret = pcie_phy_poll_ack(dbi_base, 0);
173 if (ret)
174 return ret;
175
176 /* assert wr signal */
177 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
178 writel(var, dbi_base + PCIE_PHY_CTRL);
179
180 /* wait for ack */
181 ret = pcie_phy_poll_ack(dbi_base, 1);
182 if (ret)
183 return ret;
184
185 /* deassert wr signal */
186 var = data << PCIE_PHY_CTRL_DATA_LOC;
187 writel(var, dbi_base + PCIE_PHY_CTRL);
188
189 /* wait for ack de-assertion */
190 ret = pcie_phy_poll_ack(dbi_base, 0);
191 if (ret)
192 return ret;
193
194 writel(0x0, dbi_base + PCIE_PHY_CTRL);
195
196 return 0;
197}
198
199/* Added for PCI abort handling */
200static int imx6q_pcie_abort_handler(unsigned long addr,
201 unsigned int fsr, struct pt_regs *regs)
202{
203 /*
204 * If it was an imprecise abort, then we need to correct the
205 * return address to be _after_ the instruction.
206 */
207 if (fsr & (1 << 10))
208 regs->ARM_pc += 4;
209 return 0;
210}
211
212static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
213{
214 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
215
216 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
217 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
218 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
219 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
220 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
221 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
222
223 gpio_set_value(imx6_pcie->reset_gpio, 0);
224 msleep(100);
225 gpio_set_value(imx6_pcie->reset_gpio, 1);
226
227 return 0;
228}
229
230static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
231{
232 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
233 int ret;
234
235 if (gpio_is_valid(imx6_pcie->power_on_gpio))
236 gpio_set_value(imx6_pcie->power_on_gpio, 1);
237
238 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
239 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
240 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
241 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
242
243 ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
244 if (ret) {
245 dev_err(pp->dev, "unable to enable sata_ref_100m\n");
246 goto err_sata_ref;
247 }
248
249 ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
250 if (ret) {
251 dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
252 goto err_pcie_ref;
253 }
254
255 ret = clk_prepare_enable(imx6_pcie->lvds_gate);
256 if (ret) {
257 dev_err(pp->dev, "unable to enable lvds_gate\n");
258 goto err_lvds_gate;
259 }
260
261 ret = clk_prepare_enable(imx6_pcie->pcie_axi);
262 if (ret) {
263 dev_err(pp->dev, "unable to enable pcie_axi\n");
264 goto err_pcie_axi;
265 }
266
267 /* allow the clocks to stabilize */
268 usleep_range(200, 500);
269
270 return 0;
271
272err_pcie_axi:
273 clk_disable_unprepare(imx6_pcie->lvds_gate);
274err_lvds_gate:
275 clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
276err_pcie_ref:
277 clk_disable_unprepare(imx6_pcie->sata_ref_100m);
278err_sata_ref:
279 return ret;
280
281}
282
283static void imx6_pcie_init_phy(struct pcie_port *pp)
284{
285 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
286
287 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
288 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
289
290 /* configure constant input signal to the pcie ctrl and phy */
291 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
292 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
293 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
294 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
295
296 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
297 IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
298 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
299 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
300 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
301 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
302 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
303 IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
304 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
305 IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
306}
307
308static void imx6_pcie_host_init(struct pcie_port *pp)
309{
310 int count = 0;
311 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
312
313 imx6_pcie_assert_core_reset(pp);
314
315 imx6_pcie_init_phy(pp);
316
317 imx6_pcie_deassert_core_reset(pp);
318
319 dw_pcie_setup_rc(pp);
320
321 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
322 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
323
324 while (!dw_pcie_link_up(pp)) {
325 usleep_range(100, 1000);
326 count++;
327 if (count >= 10) {
328 dev_err(pp->dev, "phy link never came up\n");
329 dev_dbg(pp->dev,
330 "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
331 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
332 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
333 break;
334 }
335 }
336
337 return;
338}
339
340static int imx6_pcie_link_up(struct pcie_port *pp)
341{
342 u32 rc, ltssm, rx_valid, temp;
343
344 /* link is debug bit 36, debug register 1 starts at bit 32 */
345 rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
346 if (rc)
347 return -EAGAIN;
348
349 /*
350 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
351 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
352 * If (MAC/LTSSM.state == Recovery.RcvrLock)
353 * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
354 * to gen2 is stuck
355 */
356 pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
357 ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
358
359 if (rx_valid & 0x01)
360 return 0;
361
362 if (ltssm != 0x0d)
363 return 0;
364
365 dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
366
367 pcie_phy_read(pp->dbi_base,
368 PHY_RX_OVRD_IN_LO, &temp);
369 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
370 | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
371 pcie_phy_write(pp->dbi_base,
372 PHY_RX_OVRD_IN_LO, temp);
373
374 usleep_range(2000, 3000);
375
376 pcie_phy_read(pp->dbi_base,
377 PHY_RX_OVRD_IN_LO, &temp);
378 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
379 | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
380 pcie_phy_write(pp->dbi_base,
381 PHY_RX_OVRD_IN_LO, temp);
382
383 return 0;
384}
385
386static struct pcie_host_ops imx6_pcie_host_ops = {
387 .link_up = imx6_pcie_link_up,
388 .host_init = imx6_pcie_host_init,
389};
390
391static int imx6_add_pcie_port(struct pcie_port *pp,
392 struct platform_device *pdev)
393{
394 int ret;
395
396 pp->irq = platform_get_irq(pdev, 0);
397 if (!pp->irq) {
398 dev_err(&pdev->dev, "failed to get irq\n");
399 return -ENODEV;
400 }
401
402 pp->root_bus_nr = -1;
403 pp->ops = &imx6_pcie_host_ops;
404
405 spin_lock_init(&pp->conf_lock);
406 ret = dw_pcie_host_init(pp);
407 if (ret) {
408 dev_err(&pdev->dev, "failed to initialize host\n");
409 return ret;
410 }
411
412 return 0;
413}
414
415static int __init imx6_pcie_probe(struct platform_device *pdev)
416{
417 struct imx6_pcie *imx6_pcie;
418 struct pcie_port *pp;
419 struct device_node *np = pdev->dev.of_node;
420 struct resource *dbi_base;
421 int ret;
422
423 imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
424 if (!imx6_pcie)
425 return -ENOMEM;
426
427 pp = &imx6_pcie->pp;
428 pp->dev = &pdev->dev;
429
430 /* Added for PCI abort handling */
431 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
432 "imprecise external abort");
433
434 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
435 if (!dbi_base) {
436 dev_err(&pdev->dev, "dbi_base memory resource not found\n");
437 return -ENODEV;
438 }
439
440 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
441 if (IS_ERR(pp->dbi_base)) {
442 dev_err(&pdev->dev, "unable to remap dbi_base\n");
443 ret = PTR_ERR(pp->dbi_base);
444 goto err;
445 }
446
447 /* Fetch GPIOs */
448 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
449 if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
450 dev_err(&pdev->dev, "no reset-gpio defined\n");
451 ret = -ENODEV;
452 }
453 ret = devm_gpio_request_one(&pdev->dev,
454 imx6_pcie->reset_gpio,
455 GPIOF_OUT_INIT_LOW,
456 "PCIe reset");
457 if (ret) {
458 dev_err(&pdev->dev, "unable to get reset gpio\n");
459 goto err;
460 }
461
462 imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
463 if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
464 ret = devm_gpio_request_one(&pdev->dev,
465 imx6_pcie->power_on_gpio,
466 GPIOF_OUT_INIT_LOW,
467 "PCIe power enable");
468 if (ret) {
469 dev_err(&pdev->dev, "unable to get power-on gpio\n");
470 goto err;
471 }
472 }
473
474 imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
475 if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
476 ret = devm_gpio_request_one(&pdev->dev,
477 imx6_pcie->wake_up_gpio,
478 GPIOF_IN,
479 "PCIe wake up");
480 if (ret) {
481 dev_err(&pdev->dev, "unable to get wake-up gpio\n");
482 goto err;
483 }
484 }
485
486 imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
487 if (gpio_is_valid(imx6_pcie->disable_gpio)) {
488 ret = devm_gpio_request_one(&pdev->dev,
489 imx6_pcie->disable_gpio,
490 GPIOF_OUT_INIT_HIGH,
491 "PCIe disable endpoint");
492 if (ret) {
493 dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
494 goto err;
495 }
496 }
497
498 /* Fetch clocks */
499 imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
500 if (IS_ERR(imx6_pcie->lvds_gate)) {
501 dev_err(&pdev->dev,
502 "lvds_gate clock select missing or invalid\n");
503 ret = PTR_ERR(imx6_pcie->lvds_gate);
504 goto err;
505 }
506
507 imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
508 if (IS_ERR(imx6_pcie->sata_ref_100m)) {
509 dev_err(&pdev->dev,
510 "sata_ref_100m clock source missing or invalid\n");
511 ret = PTR_ERR(imx6_pcie->sata_ref_100m);
512 goto err;
513 }
514
515 imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
516 if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
517 dev_err(&pdev->dev,
518 "pcie_ref_125m clock source missing or invalid\n");
519 ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
520 goto err;
521 }
522
523 imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
524 if (IS_ERR(imx6_pcie->pcie_axi)) {
525 dev_err(&pdev->dev,
526 "pcie_axi clock source missing or invalid\n");
527 ret = PTR_ERR(imx6_pcie->pcie_axi);
528 goto err;
529 }
530
531 /* Grab GPR config register range */
532 imx6_pcie->iomuxc_gpr =
533 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
534 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
535 dev_err(&pdev->dev, "unable to find iomuxc registers\n");
536 ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
537 goto err;
538 }
539
540 ret = imx6_add_pcie_port(pp, pdev);
541 if (ret < 0)
542 goto err;
543
544 platform_set_drvdata(pdev, imx6_pcie);
545 return 0;
546
547err:
548 return ret;
549}
550
551static const struct of_device_id imx6_pcie_of_match[] = {
552 { .compatible = "fsl,imx6q-pcie", },
553 {},
554};
555MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
556
557static struct platform_driver imx6_pcie_driver = {
558 .driver = {
559 .name = "imx6q-pcie",
560 .owner = THIS_MODULE,
561 .of_match_table = of_match_ptr(imx6_pcie_of_match),
562 },
563};
564
565/* Freescale PCIe driver does not allow module unload */
566
567static int __init imx6_pcie_init(void)
568{
569 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
570}
571module_init(imx6_pcie_init);
572
573MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
574MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
575MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 2e9888a0635a..7c4f38dd42ba 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -408,7 +408,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
408 408
409 list_for_each_entry(bus, &pcie->busses, list) 409 list_for_each_entry(bus, &pcie->busses, list)
410 if (bus->nr == busnr) 410 if (bus->nr == busnr)
411 return bus->area->addr; 411 return (void __iomem *)bus->area->addr;
412 412
413 bus = tegra_pcie_bus_alloc(pcie, busnr); 413 bus = tegra_pcie_bus_alloc(pcie, busnr);
414 if (IS_ERR(bus)) 414 if (IS_ERR(bus))
@@ -416,7 +416,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
416 416
417 list_add_tail(&bus->list, &pcie->busses); 417 list_add_tail(&bus->list, &pcie->busses);
418 418
419 return bus->area->addr; 419 return (void __iomem *)bus->area->addr;
420} 420}
421 421
422static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus, 422static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index c10e9ac9bbbc..896301788e9d 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -11,8 +11,11 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/msi.h>
16#include <linux/of_address.h> 19#include <linux/of_address.h>
17#include <linux/pci.h> 20#include <linux/pci.h>
18#include <linux/pci_regs.h> 21#include <linux/pci_regs.h>
@@ -142,6 +145,204 @@ int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
142 return ret; 145 return ret;
143} 146}
144 147
148static struct irq_chip dw_msi_irq_chip = {
149 .name = "PCI-MSI",
150 .irq_enable = unmask_msi_irq,
151 .irq_disable = mask_msi_irq,
152 .irq_mask = mask_msi_irq,
153 .irq_unmask = unmask_msi_irq,
154};
155
156/* MSI int handler */
157void dw_handle_msi_irq(struct pcie_port *pp)
158{
159 unsigned long val;
160 int i, pos;
161
162 for (i = 0; i < MAX_MSI_CTRLS; i++) {
163 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
164 (u32 *)&val);
165 if (val) {
166 pos = 0;
167 while ((pos = find_next_bit(&val, 32, pos)) != 32) {
168 generic_handle_irq(pp->msi_irq_start
169 + (i * 32) + pos);
170 pos++;
171 }
172 }
173 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
174 }
175}
176
177void dw_pcie_msi_init(struct pcie_port *pp)
178{
179 pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
180
181 /* program the msi_data */
182 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
183 virt_to_phys((void *)pp->msi_data));
184 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
185}
186
187static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
188{
189 int flag = 1;
190
191 do {
192 pos = find_next_zero_bit(pp->msi_irq_in_use,
193 MAX_MSI_IRQS, pos);
194 /*if you have reached to the end then get out from here.*/
195 if (pos == MAX_MSI_IRQS)
196 return -ENOSPC;
197 /*
198 * Check if this position is at correct offset.nvec is always a
199 * power of two. pos0 must be nvec bit alligned.
200 */
201 if (pos % msgvec)
202 pos += msgvec - (pos % msgvec);
203 else
204 flag = 0;
205 } while (flag);
206
207 *pos0 = pos;
208 return 0;
209}
210
211static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
212{
213 int res, bit, irq, pos0, pos1, i;
214 u32 val;
215 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
216
217 if (!pp) {
218 BUG();
219 return -EINVAL;
220 }
221
222 pos0 = find_first_zero_bit(pp->msi_irq_in_use,
223 MAX_MSI_IRQS);
224 if (pos0 % no_irqs) {
225 if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
226 goto no_valid_irq;
227 }
228 if (no_irqs > 1) {
229 pos1 = find_next_bit(pp->msi_irq_in_use,
230 MAX_MSI_IRQS, pos0);
231 /* there must be nvec number of consecutive free bits */
232 while ((pos1 - pos0) < no_irqs) {
233 if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
234 goto no_valid_irq;
235 pos1 = find_next_bit(pp->msi_irq_in_use,
236 MAX_MSI_IRQS, pos0);
237 }
238 }
239
240 irq = (pp->msi_irq_start + pos0);
241
242 if ((irq + no_irqs) > (pp->msi_irq_start + MAX_MSI_IRQS-1))
243 goto no_valid_irq;
244
245 i = 0;
246 while (i < no_irqs) {
247 set_bit(pos0 + i, pp->msi_irq_in_use);
248 irq_alloc_descs((irq + i), (irq + i), 1, 0);
249 irq_set_msi_desc(irq + i, desc);
250 /*Enable corresponding interrupt in MSI interrupt controller */
251 res = ((pos0 + i) / 32) * 12;
252 bit = (pos0 + i) % 32;
253 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
254 val |= 1 << bit;
255 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
256 i++;
257 }
258
259 *pos = pos0;
260 return irq;
261
262no_valid_irq:
263 *pos = pos0;
264 return -ENOSPC;
265}
266
267static void clear_irq(unsigned int irq)
268{
269 int res, bit, val, pos;
270 struct irq_desc *desc;
271 struct msi_desc *msi;
272 struct pcie_port *pp;
273
274 /* get the port structure */
275 desc = irq_to_desc(irq);
276 msi = irq_desc_get_msi_desc(desc);
277 pp = sys_to_pcie(msi->dev->bus->sysdata);
278 if (!pp) {
279 BUG();
280 return;
281 }
282
283 pos = irq - pp->msi_irq_start;
284
285 irq_free_desc(irq);
286
287 clear_bit(pos, pp->msi_irq_in_use);
288
289 /* Disable corresponding interrupt on MSI interrupt controller */
290 res = (pos / 32) * 12;
291 bit = pos % 32;
292 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
293 val &= ~(1 << bit);
294 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
295}
296
297static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
298 struct msi_desc *desc)
299{
300 int irq, pos, msgvec;
301 u16 msg_ctr;
302 struct msi_msg msg;
303 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
304
305 if (!pp) {
306 BUG();
307 return -EINVAL;
308 }
309
310 pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
311 &msg_ctr);
312 msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
313 if (msgvec == 0)
314 msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
315 if (msgvec > 5)
316 msgvec = 0;
317
318 irq = assign_irq((1 << msgvec), desc, &pos);
319 if (irq < 0)
320 return irq;
321
322 msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
323 msg_ctr |= msgvec << 4;
324 pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
325 msg_ctr);
326 desc->msi_attrib.multiple = msgvec;
327
328 msg.address_lo = virt_to_phys((void *)pp->msi_data);
329 msg.address_hi = 0x0;
330 msg.data = pos;
331 write_msi_msg(irq, &msg);
332
333 return 0;
334}
335
336static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
337{
338 clear_irq(irq);
339}
340
341static struct msi_chip dw_pcie_msi_chip = {
342 .setup_irq = dw_msi_setup_irq,
343 .teardown_irq = dw_msi_teardown_irq,
344};
345
145int dw_pcie_link_up(struct pcie_port *pp) 346int dw_pcie_link_up(struct pcie_port *pp)
146{ 347{
147 if (pp->ops->link_up) 348 if (pp->ops->link_up)
@@ -150,6 +351,20 @@ int dw_pcie_link_up(struct pcie_port *pp)
150 return 0; 351 return 0;
151} 352}
152 353
354static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
355 irq_hw_number_t hwirq)
356{
357 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
358 irq_set_chip_data(irq, domain->host_data);
359 set_irq_flags(irq, IRQF_VALID);
360
361 return 0;
362}
363
364static const struct irq_domain_ops msi_domain_ops = {
365 .map = dw_pcie_msi_map,
366};
367
153int __init dw_pcie_host_init(struct pcie_port *pp) 368int __init dw_pcie_host_init(struct pcie_port *pp)
154{ 369{
155 struct device_node *np = pp->dev->of_node; 370 struct device_node *np = pp->dev->of_node;
@@ -157,6 +372,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
157 struct of_pci_range_parser parser; 372 struct of_pci_range_parser parser;
158 u32 val; 373 u32 val;
159 374
375 struct irq_domain *irq_domain;
376
160 if (of_pci_range_parser_init(&parser, np)) { 377 if (of_pci_range_parser_init(&parser, np)) {
161 dev_err(pp->dev, "missing ranges property\n"); 378 dev_err(pp->dev, "missing ranges property\n");
162 return -EINVAL; 379 return -EINVAL;
@@ -223,6 +440,18 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
223 return -EINVAL; 440 return -EINVAL;
224 } 441 }
225 442
443 if (IS_ENABLED(CONFIG_PCI_MSI)) {
444 irq_domain = irq_domain_add_linear(pp->dev->of_node,
445 MAX_MSI_IRQS, &msi_domain_ops,
446 &dw_pcie_msi_chip);
447 if (!irq_domain) {
448 dev_err(pp->dev, "irq domain init failed\n");
449 return -ENXIO;
450 }
451
452 pp->msi_irq_start = irq_find_mapping(irq_domain, 0);
453 }
454
226 if (pp->ops->host_init) 455 if (pp->ops->host_init)
227 pp->ops->host_init(pp); 456 pp->ops->host_init(pp);
228 457
@@ -485,10 +714,21 @@ int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
485 return pp->irq; 714 return pp->irq;
486} 715}
487 716
717static void dw_pcie_add_bus(struct pci_bus *bus)
718{
719 if (IS_ENABLED(CONFIG_PCI_MSI)) {
720 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
721
722 dw_pcie_msi_chip.dev = pp->dev;
723 bus->msi = &dw_pcie_msi_chip;
724 }
725}
726
488static struct hw_pci dw_pci = { 727static struct hw_pci dw_pci = {
489 .setup = dw_pcie_setup, 728 .setup = dw_pcie_setup,
490 .scan = dw_pcie_scan_bus, 729 .scan = dw_pcie_scan_bus,
491 .map_irq = dw_pcie_map_irq, 730 .map_irq = dw_pcie_map_irq,
731 .add_bus = dw_pcie_add_bus,
492}; 732};
493 733
494void dw_pcie_setup_rc(struct pcie_port *pp) 734void dw_pcie_setup_rc(struct pcie_port *pp)
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 133820f1da97..faccbbf31907 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -20,6 +20,14 @@ struct pcie_port_info {
20 phys_addr_t mem_bus_addr; 20 phys_addr_t mem_bus_addr;
21}; 21};
22 22
23/*
24 * Maximum number of MSI IRQs can be 256 per controller. But keep
25 * it 32 as of now. Probably we will never need more than 32. If needed,
26 * then increment it in multiple of 32.
27 */
28#define MAX_MSI_IRQS 32
29#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
30
23struct pcie_port { 31struct pcie_port {
24 struct device *dev; 32 struct device *dev;
25 u8 root_bus_nr; 33 u8 root_bus_nr;
@@ -38,6 +46,10 @@ struct pcie_port {
38 int irq; 46 int irq;
39 u32 lanes; 47 u32 lanes;
40 struct pcie_host_ops *ops; 48 struct pcie_host_ops *ops;
49 int msi_irq;
50 int msi_irq_start;
51 unsigned long msi_data;
52 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
41}; 53};
42 54
43struct pcie_host_ops { 55struct pcie_host_ops {
@@ -57,6 +69,8 @@ int cfg_read(void __iomem *addr, int where, int size, u32 *val);
57int cfg_write(void __iomem *addr, int where, int size, u32 val); 69int cfg_write(void __iomem *addr, int where, int size, u32 val);
58int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val); 70int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
59int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val); 71int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
72void dw_handle_msi_irq(struct pcie_port *pp);
73void dw_pcie_msi_init(struct pcie_port *pp);
60int dw_pcie_link_up(struct pcie_port *pp); 74int dw_pcie_link_up(struct pcie_port *pp);
61void dw_pcie_setup_rc(struct pcie_port *pp); 75void dw_pcie_setup_rc(struct pcie_port *pp);
62int dw_pcie_host_init(struct pcie_port *pp); 76int dw_pcie_host_init(struct pcie_port *pp);