diff options
author | Jingoo Han <jg1.han@samsung.com> | 2013-07-31 04:14:10 -0400 |
---|---|---|
committer | Bjorn Helgaas <bhelgaas@google.com> | 2013-08-12 14:18:20 -0400 |
commit | 4b1ced841b2e31470ae4bb47988891754ce4d8c7 (patch) | |
tree | 8fdf59944d73d9b946922e9d3c42239acce6aa8f /drivers/pci | |
parent | 5477a33b51b7282aca731213dc592b5f0c4e7c13 (diff) |
PCI: exynos: Split into Synopsys part and Exynos part
Exynos PCIe IP consists of Synopsys specific part and Exynos
specific part. Only core block is a Synopsys Designware part;
other parts are Exynos specific.
Also, the Synopsys Designware part can be shared with other
platforms; thus, it can be split two parts such as Synopsys
Designware part and Exynos specific part.
Signed-off-by: Jingoo Han <jg1.han@samsung.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Cc: Pratyush Anand <pratyush.anand@st.com>
Cc: Mohit KUMAR <Mohit.KUMAR@st.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/host/Makefile | 3 | ||||
-rw-r--r-- | drivers/pci/host/pci-exynos.c | 530 | ||||
-rw-r--r-- | drivers/pci/host/pcie-designware.c | 1011 | ||||
-rw-r--r-- | drivers/pci/host/pcie-designware.h | 65 |
4 files changed, 869 insertions, 740 deletions
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 086d8500e849..ab79ccb5bbff 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o | ||
2 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o | 1 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o |
2 | obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o | ||
3 | obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o | ||
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c new file mode 100644 index 000000000000..012ca8aec71a --- /dev/null +++ b/drivers/pci/host/pci-exynos.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * PCIe host controller driver for Samsung EXYNOS SoCs | ||
3 | * | ||
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com | ||
6 | * | ||
7 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/gpio.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of_gpio.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/resource.h> | ||
24 | #include <linux/signal.h> | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | #include "pcie-designware.h" | ||
28 | |||
29 | #define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp) | ||
30 | |||
31 | struct exynos_pcie { | ||
32 | void __iomem *elbi_base; | ||
33 | void __iomem *phy_base; | ||
34 | void __iomem *block_base; | ||
35 | int reset_gpio; | ||
36 | struct clk *clk; | ||
37 | struct clk *bus_clk; | ||
38 | struct pcie_port pp; | ||
39 | }; | ||
40 | |||
41 | /* PCIe ELBI registers */ | ||
42 | #define PCIE_IRQ_PULSE 0x000 | ||
43 | #define IRQ_INTA_ASSERT (0x1 << 0) | ||
44 | #define IRQ_INTB_ASSERT (0x1 << 2) | ||
45 | #define IRQ_INTC_ASSERT (0x1 << 4) | ||
46 | #define IRQ_INTD_ASSERT (0x1 << 6) | ||
47 | #define PCIE_IRQ_LEVEL 0x004 | ||
48 | #define PCIE_IRQ_SPECIAL 0x008 | ||
49 | #define PCIE_IRQ_EN_PULSE 0x00c | ||
50 | #define PCIE_IRQ_EN_LEVEL 0x010 | ||
51 | #define PCIE_IRQ_EN_SPECIAL 0x014 | ||
52 | #define PCIE_PWR_RESET 0x018 | ||
53 | #define PCIE_CORE_RESET 0x01c | ||
54 | #define PCIE_CORE_RESET_ENABLE (0x1 << 0) | ||
55 | #define PCIE_STICKY_RESET 0x020 | ||
56 | #define PCIE_NONSTICKY_RESET 0x024 | ||
57 | #define PCIE_APP_INIT_RESET 0x028 | ||
58 | #define PCIE_APP_LTSSM_ENABLE 0x02c | ||
59 | #define PCIE_ELBI_RDLH_LINKUP 0x064 | ||
60 | #define PCIE_ELBI_LTSSM_ENABLE 0x1 | ||
61 | #define PCIE_ELBI_SLV_AWMISC 0x11c | ||
62 | #define PCIE_ELBI_SLV_ARMISC 0x120 | ||
63 | #define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) | ||
64 | |||
65 | /* PCIe Purple registers */ | ||
66 | #define PCIE_PHY_GLOBAL_RESET 0x000 | ||
67 | #define PCIE_PHY_COMMON_RESET 0x004 | ||
68 | #define PCIE_PHY_CMN_REG 0x008 | ||
69 | #define PCIE_PHY_MAC_RESET 0x00c | ||
70 | #define PCIE_PHY_PLL_LOCKED 0x010 | ||
71 | #define PCIE_PHY_TRSVREG_RESET 0x020 | ||
72 | #define PCIE_PHY_TRSV_RESET 0x024 | ||
73 | |||
74 | /* PCIe PHY registers */ | ||
75 | #define PCIE_PHY_IMPEDANCE 0x004 | ||
76 | #define PCIE_PHY_PLL_DIV_0 0x008 | ||
77 | #define PCIE_PHY_PLL_BIAS 0x00c | ||
78 | #define PCIE_PHY_DCC_FEEDBACK 0x014 | ||
79 | #define PCIE_PHY_PLL_DIV_1 0x05c | ||
80 | #define PCIE_PHY_TRSV0_EMP_LVL 0x084 | ||
81 | #define PCIE_PHY_TRSV0_DRV_LVL 0x088 | ||
82 | #define PCIE_PHY_TRSV0_RXCDR 0x0ac | ||
83 | #define PCIE_PHY_TRSV0_LVCC 0x0dc | ||
84 | #define PCIE_PHY_TRSV1_EMP_LVL 0x144 | ||
85 | #define PCIE_PHY_TRSV1_RXCDR 0x16c | ||
86 | #define PCIE_PHY_TRSV1_LVCC 0x19c | ||
87 | #define PCIE_PHY_TRSV2_EMP_LVL 0x204 | ||
88 | #define PCIE_PHY_TRSV2_RXCDR 0x22c | ||
89 | #define PCIE_PHY_TRSV2_LVCC 0x25c | ||
90 | #define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 | ||
91 | #define PCIE_PHY_TRSV3_RXCDR 0x2ec | ||
92 | #define PCIE_PHY_TRSV3_LVCC 0x31c | ||
93 | |||
94 | static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) | ||
95 | { | ||
96 | u32 val; | ||
97 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
98 | |||
99 | if (on) { | ||
100 | val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC); | ||
101 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
102 | writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC); | ||
103 | } else { | ||
104 | val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC); | ||
105 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
106 | writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on) | ||
111 | { | ||
112 | u32 val; | ||
113 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
114 | |||
115 | if (on) { | ||
116 | val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
117 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
118 | writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
119 | } else { | ||
120 | val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
121 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
122 | writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static void exynos_pcie_assert_core_reset(struct pcie_port *pp) | ||
127 | { | ||
128 | u32 val; | ||
129 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
130 | void __iomem *elbi_base = exynos_pcie->elbi_base; | ||
131 | |||
132 | val = readl(elbi_base + PCIE_CORE_RESET); | ||
133 | val &= ~PCIE_CORE_RESET_ENABLE; | ||
134 | writel(val, elbi_base + PCIE_CORE_RESET); | ||
135 | writel(0, elbi_base + PCIE_PWR_RESET); | ||
136 | writel(0, elbi_base + PCIE_STICKY_RESET); | ||
137 | writel(0, elbi_base + PCIE_NONSTICKY_RESET); | ||
138 | } | ||
139 | |||
140 | static void exynos_pcie_deassert_core_reset(struct pcie_port *pp) | ||
141 | { | ||
142 | u32 val; | ||
143 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
144 | void __iomem *elbi_base = exynos_pcie->elbi_base; | ||
145 | void __iomem *block_base = exynos_pcie->block_base; | ||
146 | |||
147 | val = readl(elbi_base + PCIE_CORE_RESET); | ||
148 | val |= PCIE_CORE_RESET_ENABLE; | ||
149 | writel(val, elbi_base + PCIE_CORE_RESET); | ||
150 | writel(1, elbi_base + PCIE_STICKY_RESET); | ||
151 | writel(1, elbi_base + PCIE_NONSTICKY_RESET); | ||
152 | writel(1, elbi_base + PCIE_APP_INIT_RESET); | ||
153 | writel(0, elbi_base + PCIE_APP_INIT_RESET); | ||
154 | writel(1, block_base + PCIE_PHY_MAC_RESET); | ||
155 | } | ||
156 | |||
157 | static void exynos_pcie_assert_phy_reset(struct pcie_port *pp) | ||
158 | { | ||
159 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
160 | void __iomem *block_base = exynos_pcie->block_base; | ||
161 | |||
162 | writel(0, block_base + PCIE_PHY_MAC_RESET); | ||
163 | writel(1, block_base + PCIE_PHY_GLOBAL_RESET); | ||
164 | } | ||
165 | |||
166 | static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) | ||
167 | { | ||
168 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
169 | void __iomem *elbi_base = exynos_pcie->elbi_base; | ||
170 | void __iomem *block_base = exynos_pcie->block_base; | ||
171 | |||
172 | writel(0, block_base + PCIE_PHY_GLOBAL_RESET); | ||
173 | writel(1, elbi_base + PCIE_PWR_RESET); | ||
174 | writel(0, block_base + PCIE_PHY_COMMON_RESET); | ||
175 | writel(0, block_base + PCIE_PHY_CMN_REG); | ||
176 | writel(0, block_base + PCIE_PHY_TRSVREG_RESET); | ||
177 | writel(0, block_base + PCIE_PHY_TRSV_RESET); | ||
178 | } | ||
179 | |||
180 | static void exynos_pcie_init_phy(struct pcie_port *pp) | ||
181 | { | ||
182 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
183 | void __iomem *phy_base = exynos_pcie->phy_base; | ||
184 | |||
185 | /* DCC feedback control off */ | ||
186 | writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK); | ||
187 | |||
188 | /* set TX/RX impedance */ | ||
189 | writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE); | ||
190 | |||
191 | /* set 50Mhz PHY clock */ | ||
192 | writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0); | ||
193 | writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1); | ||
194 | |||
195 | /* set TX Differential output for lane 0 */ | ||
196 | writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL); | ||
197 | |||
198 | /* set TX Pre-emphasis Level Control for lane 0 to minimum */ | ||
199 | writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL); | ||
200 | |||
201 | /* set RX clock and data recovery bandwidth */ | ||
202 | writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS); | ||
203 | writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR); | ||
204 | writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR); | ||
205 | writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR); | ||
206 | writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR); | ||
207 | |||
208 | /* change TX Pre-emphasis Level Control for lanes */ | ||
209 | writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL); | ||
210 | writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL); | ||
211 | writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL); | ||
212 | writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL); | ||
213 | |||
214 | /* set LVCC */ | ||
215 | writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC); | ||
216 | writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC); | ||
217 | writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC); | ||
218 | writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC); | ||
219 | } | ||
220 | |||
221 | static void exynos_pcie_assert_reset(struct pcie_port *pp) | ||
222 | { | ||
223 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
224 | |||
225 | if (exynos_pcie->reset_gpio >= 0) | ||
226 | devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio, | ||
227 | GPIOF_OUT_INIT_HIGH, "RESET"); | ||
228 | return; | ||
229 | } | ||
230 | |||
231 | static int exynos_pcie_establish_link(struct pcie_port *pp) | ||
232 | { | ||
233 | u32 val; | ||
234 | int count = 0; | ||
235 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
236 | void __iomem *elbi_base = exynos_pcie->elbi_base; | ||
237 | void __iomem *block_base = exynos_pcie->block_base; | ||
238 | void __iomem *phy_base = exynos_pcie->phy_base; | ||
239 | |||
240 | if (dw_pcie_link_up(pp)) { | ||
241 | dev_err(pp->dev, "Link already up\n"); | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | /* assert reset signals */ | ||
246 | exynos_pcie_assert_core_reset(pp); | ||
247 | exynos_pcie_assert_phy_reset(pp); | ||
248 | |||
249 | /* de-assert phy reset */ | ||
250 | exynos_pcie_deassert_phy_reset(pp); | ||
251 | |||
252 | /* initialize phy */ | ||
253 | exynos_pcie_init_phy(pp); | ||
254 | |||
255 | /* pulse for common reset */ | ||
256 | writel(1, block_base + PCIE_PHY_COMMON_RESET); | ||
257 | udelay(500); | ||
258 | writel(0, block_base + PCIE_PHY_COMMON_RESET); | ||
259 | |||
260 | /* de-assert core reset */ | ||
261 | exynos_pcie_deassert_core_reset(pp); | ||
262 | |||
263 | /* setup root complex */ | ||
264 | dw_pcie_setup_rc(pp); | ||
265 | |||
266 | /* assert reset signal */ | ||
267 | exynos_pcie_assert_reset(pp); | ||
268 | |||
269 | /* assert LTSSM enable */ | ||
270 | writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE); | ||
271 | |||
272 | /* check if the link is up or not */ | ||
273 | while (!dw_pcie_link_up(pp)) { | ||
274 | mdelay(100); | ||
275 | count++; | ||
276 | if (count == 10) { | ||
277 | while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) { | ||
278 | val = readl(block_base + PCIE_PHY_PLL_LOCKED); | ||
279 | dev_info(pp->dev, "PLL Locked: 0x%x\n", val); | ||
280 | } | ||
281 | dev_err(pp->dev, "PCIe Link Fail\n"); | ||
282 | return -EINVAL; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | dev_info(pp->dev, "Link up\n"); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) | ||
292 | { | ||
293 | u32 val; | ||
294 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
295 | void __iomem *elbi_base = exynos_pcie->elbi_base; | ||
296 | |||
297 | val = readl(elbi_base + PCIE_IRQ_PULSE); | ||
298 | writel(val, elbi_base + PCIE_IRQ_PULSE); | ||
299 | return; | ||
300 | } | ||
301 | |||
302 | static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) | ||
303 | { | ||
304 | u32 val; | ||
305 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
306 | void __iomem *elbi_base = exynos_pcie->elbi_base; | ||
307 | |||
308 | /* enable INTX interrupt */ | ||
309 | val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | | ||
310 | IRQ_INTC_ASSERT | IRQ_INTD_ASSERT, | ||
311 | writel(val, elbi_base + PCIE_IRQ_EN_PULSE); | ||
312 | return; | ||
313 | } | ||
314 | |||
315 | static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) | ||
316 | { | ||
317 | struct pcie_port *pp = arg; | ||
318 | |||
319 | exynos_pcie_clear_irq_pulse(pp); | ||
320 | return IRQ_HANDLED; | ||
321 | } | ||
322 | |||
323 | static void exynos_pcie_enable_interrupts(struct pcie_port *pp) | ||
324 | { | ||
325 | exynos_pcie_enable_irq_pulse(pp); | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | static inline void exynos_pcie_readl_rc(struct pcie_port *pp, | ||
330 | void __iomem *dbi_base, u32 *val) | ||
331 | { | ||
332 | exynos_pcie_sideband_dbi_r_mode(pp, true); | ||
333 | *val = readl(dbi_base); | ||
334 | exynos_pcie_sideband_dbi_r_mode(pp, false); | ||
335 | return; | ||
336 | } | ||
337 | |||
338 | static inline void exynos_pcie_writel_rc(struct pcie_port *pp, | ||
339 | u32 val, void __iomem *dbi_base) | ||
340 | { | ||
341 | exynos_pcie_sideband_dbi_w_mode(pp, true); | ||
342 | writel(val, dbi_base); | ||
343 | exynos_pcie_sideband_dbi_w_mode(pp, false); | ||
344 | return; | ||
345 | } | ||
346 | |||
347 | static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
348 | u32 *val) | ||
349 | { | ||
350 | int ret; | ||
351 | |||
352 | exynos_pcie_sideband_dbi_r_mode(pp, true); | ||
353 | ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); | ||
354 | exynos_pcie_sideband_dbi_r_mode(pp, false); | ||
355 | return ret; | ||
356 | } | ||
357 | |||
358 | static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | ||
359 | u32 val) | ||
360 | { | ||
361 | int ret; | ||
362 | |||
363 | exynos_pcie_sideband_dbi_w_mode(pp, true); | ||
364 | ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); | ||
365 | exynos_pcie_sideband_dbi_w_mode(pp, false); | ||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | static int exynos_pcie_link_up(struct pcie_port *pp) | ||
370 | { | ||
371 | struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); | ||
372 | u32 val = readl(exynos_pcie->elbi_base + PCIE_ELBI_RDLH_LINKUP); | ||
373 | |||
374 | if (val == PCIE_ELBI_LTSSM_ENABLE) | ||
375 | return 1; | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static void exynos_pcie_host_init(struct pcie_port *pp) | ||
381 | { | ||
382 | exynos_pcie_establish_link(pp); | ||
383 | exynos_pcie_enable_interrupts(pp); | ||
384 | } | ||
385 | |||
386 | static struct pcie_host_ops exynos_pcie_host_ops = { | ||
387 | .readl_rc = exynos_pcie_readl_rc, | ||
388 | .writel_rc = exynos_pcie_writel_rc, | ||
389 | .rd_own_conf = exynos_pcie_rd_own_conf, | ||
390 | .wr_own_conf = exynos_pcie_wr_own_conf, | ||
391 | .link_up = exynos_pcie_link_up, | ||
392 | .host_init = exynos_pcie_host_init, | ||
393 | }; | ||
394 | |||
395 | static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) | ||
396 | { | ||
397 | int ret; | ||
398 | |||
399 | pp->irq = platform_get_irq(pdev, 1); | ||
400 | if (!pp->irq) { | ||
401 | dev_err(&pdev->dev, "failed to get irq\n"); | ||
402 | return -ENODEV; | ||
403 | } | ||
404 | ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler, | ||
405 | IRQF_SHARED, "exynos-pcie", pp); | ||
406 | if (ret) { | ||
407 | dev_err(&pdev->dev, "failed to request irq\n"); | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | pp->root_bus_nr = -1; | ||
412 | pp->ops = &exynos_pcie_host_ops; | ||
413 | |||
414 | spin_lock_init(&pp->conf_lock); | ||
415 | ret = dw_pcie_host_init(pp); | ||
416 | if (ret) { | ||
417 | dev_err(&pdev->dev, "failed to initialize host\n"); | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int __init exynos_pcie_probe(struct platform_device *pdev) | ||
425 | { | ||
426 | struct exynos_pcie *exynos_pcie; | ||
427 | struct pcie_port *pp; | ||
428 | struct device_node *np = pdev->dev.of_node; | ||
429 | struct resource *elbi_base; | ||
430 | struct resource *phy_base; | ||
431 | struct resource *block_base; | ||
432 | int ret; | ||
433 | |||
434 | exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie), | ||
435 | GFP_KERNEL); | ||
436 | if (!exynos_pcie) { | ||
437 | dev_err(&pdev->dev, "no memory for exynos pcie\n"); | ||
438 | return -ENOMEM; | ||
439 | } | ||
440 | |||
441 | pp = &exynos_pcie->pp; | ||
442 | |||
443 | pp->dev = &pdev->dev; | ||
444 | |||
445 | exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); | ||
446 | |||
447 | exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie"); | ||
448 | if (IS_ERR(exynos_pcie->clk)) { | ||
449 | dev_err(&pdev->dev, "Failed to get pcie rc clock\n"); | ||
450 | return PTR_ERR(exynos_pcie->clk); | ||
451 | } | ||
452 | ret = clk_prepare_enable(exynos_pcie->clk); | ||
453 | if (ret) | ||
454 | return ret; | ||
455 | |||
456 | exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); | ||
457 | if (IS_ERR(exynos_pcie->bus_clk)) { | ||
458 | dev_err(&pdev->dev, "Failed to get pcie bus clock\n"); | ||
459 | ret = PTR_ERR(exynos_pcie->bus_clk); | ||
460 | goto fail_clk; | ||
461 | } | ||
462 | ret = clk_prepare_enable(exynos_pcie->bus_clk); | ||
463 | if (ret) | ||
464 | goto fail_clk; | ||
465 | |||
466 | elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
467 | exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); | ||
468 | if (IS_ERR(exynos_pcie->elbi_base)) | ||
469 | return PTR_ERR(exynos_pcie->elbi_base); | ||
470 | |||
471 | phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
472 | exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); | ||
473 | if (IS_ERR(exynos_pcie->phy_base)) | ||
474 | return PTR_ERR(exynos_pcie->phy_base); | ||
475 | |||
476 | block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
477 | exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base); | ||
478 | if (IS_ERR(exynos_pcie->block_base)) | ||
479 | return PTR_ERR(exynos_pcie->block_base); | ||
480 | |||
481 | ret = add_pcie_port(pp, pdev); | ||
482 | if (ret < 0) | ||
483 | goto fail_bus_clk; | ||
484 | |||
485 | platform_set_drvdata(pdev, exynos_pcie); | ||
486 | return 0; | ||
487 | |||
488 | fail_bus_clk: | ||
489 | clk_disable_unprepare(exynos_pcie->bus_clk); | ||
490 | fail_clk: | ||
491 | clk_disable_unprepare(exynos_pcie->clk); | ||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | static int __exit exynos_pcie_remove(struct platform_device *pdev) | ||
496 | { | ||
497 | struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev); | ||
498 | |||
499 | clk_disable_unprepare(exynos_pcie->bus_clk); | ||
500 | clk_disable_unprepare(exynos_pcie->clk); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static const struct of_device_id exynos_pcie_of_match[] = { | ||
506 | { .compatible = "samsung,exynos5440-pcie", }, | ||
507 | {}, | ||
508 | }; | ||
509 | MODULE_DEVICE_TABLE(of, exynos_pcie_of_match); | ||
510 | |||
511 | static struct platform_driver exynos_pcie_driver = { | ||
512 | .remove = __exit_p(exynos_pcie_remove), | ||
513 | .driver = { | ||
514 | .name = "exynos-pcie", | ||
515 | .owner = THIS_MODULE, | ||
516 | .of_match_table = of_match_ptr(exynos_pcie_of_match), | ||
517 | }, | ||
518 | }; | ||
519 | |||
520 | /* Exynos PCIe driver does not allow module unload */ | ||
521 | |||
522 | static int __init pcie_init(void) | ||
523 | { | ||
524 | return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); | ||
525 | } | ||
526 | subsys_initcall(pcie_init); | ||
527 | |||
528 | MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); | ||
529 | MODULE_DESCRIPTION("Samsung PCIe host controller driver"); | ||
530 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 26bdbda8ff90..77b0c257f215 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * PCIe host controller driver for Samsung EXYNOS SoCs | 2 | * Synopsys Designware PCIe host controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | 4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. |
5 | * http://www.samsung.com | 5 | * http://www.samsung.com |
@@ -11,74 +11,28 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/gpio.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
19 | #include <linux/list.h> | ||
20 | #include <linux/module.h> | 15 | #include <linux/module.h> |
21 | #include <linux/of.h> | ||
22 | #include <linux/of_address.h> | 16 | #include <linux/of_address.h> |
23 | #include <linux/of_gpio.h> | ||
24 | #include <linux/of_pci.h> | ||
25 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
26 | #include <linux/pci_regs.h> | 18 | #include <linux/pci_regs.h> |
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/resource.h> | ||
29 | #include <linux/signal.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/types.h> | 19 | #include <linux/types.h> |
32 | 20 | ||
33 | struct pcie_port_info { | 21 | #include "pcie-designware.h" |
34 | u32 cfg0_size; | ||
35 | u32 cfg1_size; | ||
36 | u32 io_size; | ||
37 | u32 mem_size; | ||
38 | phys_addr_t io_bus_addr; | ||
39 | phys_addr_t mem_bus_addr; | ||
40 | }; | ||
41 | |||
42 | struct pcie_port { | ||
43 | struct device *dev; | ||
44 | u8 controller; | ||
45 | u8 root_bus_nr; | ||
46 | void __iomem *dbi_base; | ||
47 | void __iomem *elbi_base; | ||
48 | void __iomem *phy_base; | ||
49 | void __iomem *purple_base; | ||
50 | u64 cfg0_base; | ||
51 | void __iomem *va_cfg0_base; | ||
52 | u64 cfg1_base; | ||
53 | void __iomem *va_cfg1_base; | ||
54 | u64 io_base; | ||
55 | u64 mem_base; | ||
56 | spinlock_t conf_lock; | ||
57 | struct resource cfg; | ||
58 | struct resource io; | ||
59 | struct resource mem; | ||
60 | struct pcie_port_info config; | ||
61 | struct clk *clk; | ||
62 | struct clk *bus_clk; | ||
63 | int irq; | ||
64 | int reset_gpio; | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * Exynos PCIe IP consists of Synopsys specific part and Exynos | ||
69 | * specific part. Only core block is a Synopsys designware part; | ||
70 | * other parts are Exynos specific. | ||
71 | */ | ||
72 | 22 | ||
73 | /* Synopsis specific PCIE configuration registers */ | 23 | /* Synopsis specific PCIE configuration registers */ |
74 | #define PCIE_PORT_LINK_CONTROL 0x710 | 24 | #define PCIE_PORT_LINK_CONTROL 0x710 |
75 | #define PORT_LINK_MODE_MASK (0x3f << 16) | 25 | #define PORT_LINK_MODE_MASK (0x3f << 16) |
26 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) | ||
27 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) | ||
76 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) | 28 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) |
77 | 29 | ||
78 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | 30 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C |
79 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | 31 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) |
80 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) | 32 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) |
81 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x7 << 8) | 33 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) |
34 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) | ||
35 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) | ||
82 | 36 | ||
83 | #define PCIE_MSI_ADDR_LO 0x820 | 37 | #define PCIE_MSI_ADDR_LO 0x820 |
84 | #define PCIE_MSI_ADDR_HI 0x824 | 38 | #define PCIE_MSI_ADDR_HI 0x824 |
@@ -108,69 +62,16 @@ struct pcie_port { | |||
108 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) | 62 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) |
109 | #define PCIE_ATU_UPPER_TARGET 0x91C | 63 | #define PCIE_ATU_UPPER_TARGET 0x91C |
110 | 64 | ||
111 | /* Exynos specific PCIE configuration registers */ | 65 | static struct hw_pci dw_pci; |
112 | 66 | ||
113 | /* PCIe ELBI registers */ | 67 | unsigned long global_io_offset; |
114 | #define PCIE_IRQ_PULSE 0x000 | ||
115 | #define IRQ_INTA_ASSERT (0x1 << 0) | ||
116 | #define IRQ_INTB_ASSERT (0x1 << 2) | ||
117 | #define IRQ_INTC_ASSERT (0x1 << 4) | ||
118 | #define IRQ_INTD_ASSERT (0x1 << 6) | ||
119 | #define PCIE_IRQ_LEVEL 0x004 | ||
120 | #define PCIE_IRQ_SPECIAL 0x008 | ||
121 | #define PCIE_IRQ_EN_PULSE 0x00c | ||
122 | #define PCIE_IRQ_EN_LEVEL 0x010 | ||
123 | #define PCIE_IRQ_EN_SPECIAL 0x014 | ||
124 | #define PCIE_PWR_RESET 0x018 | ||
125 | #define PCIE_CORE_RESET 0x01c | ||
126 | #define PCIE_CORE_RESET_ENABLE (0x1 << 0) | ||
127 | #define PCIE_STICKY_RESET 0x020 | ||
128 | #define PCIE_NONSTICKY_RESET 0x024 | ||
129 | #define PCIE_APP_INIT_RESET 0x028 | ||
130 | #define PCIE_APP_LTSSM_ENABLE 0x02c | ||
131 | #define PCIE_ELBI_RDLH_LINKUP 0x064 | ||
132 | #define PCIE_ELBI_LTSSM_ENABLE 0x1 | ||
133 | #define PCIE_ELBI_SLV_AWMISC 0x11c | ||
134 | #define PCIE_ELBI_SLV_ARMISC 0x120 | ||
135 | #define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) | ||
136 | |||
137 | /* PCIe Purple registers */ | ||
138 | #define PCIE_PHY_GLOBAL_RESET 0x000 | ||
139 | #define PCIE_PHY_COMMON_RESET 0x004 | ||
140 | #define PCIE_PHY_CMN_REG 0x008 | ||
141 | #define PCIE_PHY_MAC_RESET 0x00c | ||
142 | #define PCIE_PHY_PLL_LOCKED 0x010 | ||
143 | #define PCIE_PHY_TRSVREG_RESET 0x020 | ||
144 | #define PCIE_PHY_TRSV_RESET 0x024 | ||
145 | |||
146 | /* PCIe PHY registers */ | ||
147 | #define PCIE_PHY_IMPEDANCE 0x004 | ||
148 | #define PCIE_PHY_PLL_DIV_0 0x008 | ||
149 | #define PCIE_PHY_PLL_BIAS 0x00c | ||
150 | #define PCIE_PHY_DCC_FEEDBACK 0x014 | ||
151 | #define PCIE_PHY_PLL_DIV_1 0x05c | ||
152 | #define PCIE_PHY_TRSV0_EMP_LVL 0x084 | ||
153 | #define PCIE_PHY_TRSV0_DRV_LVL 0x088 | ||
154 | #define PCIE_PHY_TRSV0_RXCDR 0x0ac | ||
155 | #define PCIE_PHY_TRSV0_LVCC 0x0dc | ||
156 | #define PCIE_PHY_TRSV1_EMP_LVL 0x144 | ||
157 | #define PCIE_PHY_TRSV1_RXCDR 0x16c | ||
158 | #define PCIE_PHY_TRSV1_LVCC 0x19c | ||
159 | #define PCIE_PHY_TRSV2_EMP_LVL 0x204 | ||
160 | #define PCIE_PHY_TRSV2_RXCDR 0x22c | ||
161 | #define PCIE_PHY_TRSV2_LVCC 0x25c | ||
162 | #define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 | ||
163 | #define PCIE_PHY_TRSV3_RXCDR 0x2ec | ||
164 | #define PCIE_PHY_TRSV3_LVCC 0x31c | ||
165 | |||
166 | static struct hw_pci exynos_pci; | ||
167 | 68 | ||
168 | static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) | 69 | static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) |
169 | { | 70 | { |
170 | return sys->private_data; | 71 | return sys->private_data; |
171 | } | 72 | } |
172 | 73 | ||
173 | static inline int cfg_read(void *addr, int where, int size, u32 *val) | 74 | int cfg_read(void __iomem *addr, int where, int size, u32 *val) |
174 | { | 75 | { |
175 | *val = readl(addr); | 76 | *val = readl(addr); |
176 | 77 | ||
@@ -184,7 +85,7 @@ static inline int cfg_read(void *addr, int where, int size, u32 *val) | |||
184 | return PCIBIOS_SUCCESSFUL; | 85 | return PCIBIOS_SUCCESSFUL; |
185 | } | 86 | } |
186 | 87 | ||
187 | static inline int cfg_write(void *addr, int where, int size, u32 val) | 88 | int cfg_write(void __iomem *addr, int where, int size, u32 val) |
188 | { | 89 | { |
189 | if (size == 4) | 90 | if (size == 4) |
190 | writel(val, addr); | 91 | writel(val, addr); |
@@ -198,155 +99,241 @@ static inline int cfg_write(void *addr, int where, int size, u32 val) | |||
198 | return PCIBIOS_SUCCESSFUL; | 99 | return PCIBIOS_SUCCESSFUL; |
199 | } | 100 | } |
200 | 101 | ||
201 | static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) | 102 | static inline void dw_pcie_readl_rc(struct pcie_port *pp, |
103 | void __iomem *dbi_addr, u32 *val) | ||
202 | { | 104 | { |
203 | u32 val; | 105 | if (pp->ops->readl_rc) |
204 | 106 | pp->ops->readl_rc(pp, dbi_addr, val); | |
205 | if (on) { | 107 | else |
206 | val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC); | 108 | *val = readl(dbi_addr); |
207 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
208 | writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC); | ||
209 | } else { | ||
210 | val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC); | ||
211 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
212 | writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on) | ||
217 | { | ||
218 | u32 val; | ||
219 | |||
220 | if (on) { | ||
221 | val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
222 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
223 | writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
224 | } else { | ||
225 | val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
226 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
227 | writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | static inline void readl_rc(struct pcie_port *pp, void *dbi_base, u32 *val) | ||
232 | { | ||
233 | exynos_pcie_sideband_dbi_r_mode(pp, true); | ||
234 | *val = readl(dbi_base); | ||
235 | exynos_pcie_sideband_dbi_r_mode(pp, false); | ||
236 | return; | ||
237 | } | 109 | } |
238 | 110 | ||
239 | static inline void writel_rc(struct pcie_port *pp, u32 val, void *dbi_base) | 111 | static inline void dw_pcie_writel_rc(struct pcie_port *pp, |
112 | u32 val, void __iomem *dbi_addr) | ||
240 | { | 113 | { |
241 | exynos_pcie_sideband_dbi_w_mode(pp, true); | 114 | if (pp->ops->writel_rc) |
242 | writel(val, dbi_base); | 115 | pp->ops->writel_rc(pp, val, dbi_addr); |
243 | exynos_pcie_sideband_dbi_w_mode(pp, false); | 116 | else |
244 | return; | 117 | writel(val, dbi_addr); |
245 | } | 118 | } |
246 | 119 | ||
247 | static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | 120 | int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, |
248 | u32 *val) | 121 | u32 *val) |
249 | { | 122 | { |
250 | int ret; | 123 | int ret; |
251 | 124 | ||
252 | exynos_pcie_sideband_dbi_r_mode(pp, true); | 125 | if (pp->ops->rd_own_conf) |
253 | ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); | 126 | ret = pp->ops->rd_own_conf(pp, where, size, val); |
254 | exynos_pcie_sideband_dbi_r_mode(pp, false); | 127 | else |
128 | ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); | ||
129 | |||
255 | return ret; | 130 | return ret; |
256 | } | 131 | } |
257 | 132 | ||
258 | static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | 133 | int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, |
259 | u32 val) | 134 | u32 val) |
260 | { | 135 | { |
261 | int ret; | 136 | int ret; |
262 | 137 | ||
263 | exynos_pcie_sideband_dbi_w_mode(pp, true); | 138 | if (pp->ops->wr_own_conf) |
264 | ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); | 139 | ret = pp->ops->wr_own_conf(pp, where, size, val); |
265 | exynos_pcie_sideband_dbi_w_mode(pp, false); | 140 | else |
141 | ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, | ||
142 | val); | ||
143 | |||
266 | return ret; | 144 | return ret; |
267 | } | 145 | } |
268 | 146 | ||
269 | static void exynos_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) | 147 | int dw_pcie_link_up(struct pcie_port *pp) |
148 | { | ||
149 | if (pp->ops->link_up) | ||
150 | return pp->ops->link_up(pp); | ||
151 | else | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | int __init dw_pcie_host_init(struct pcie_port *pp) | ||
156 | { | ||
157 | struct device_node *np = pp->dev->of_node; | ||
158 | struct of_pci_range range; | ||
159 | struct of_pci_range_parser parser; | ||
160 | u32 val; | ||
161 | |||
162 | if (of_pci_range_parser_init(&parser, np)) { | ||
163 | dev_err(pp->dev, "missing ranges property\n"); | ||
164 | return -EINVAL; | ||
165 | } | ||
166 | |||
167 | /* Get the I/O and memory ranges from DT */ | ||
168 | for_each_of_pci_range(&parser, &range) { | ||
169 | unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; | ||
170 | if (restype == IORESOURCE_IO) { | ||
171 | of_pci_range_to_resource(&range, np, &pp->io); | ||
172 | pp->io.name = "I/O"; | ||
173 | pp->io.start = max_t(resource_size_t, | ||
174 | PCIBIOS_MIN_IO, | ||
175 | range.pci_addr + global_io_offset); | ||
176 | pp->io.end = min_t(resource_size_t, | ||
177 | IO_SPACE_LIMIT, | ||
178 | range.pci_addr + range.size | ||
179 | + global_io_offset); | ||
180 | pp->config.io_size = resource_size(&pp->io); | ||
181 | pp->config.io_bus_addr = range.pci_addr; | ||
182 | } | ||
183 | if (restype == IORESOURCE_MEM) { | ||
184 | of_pci_range_to_resource(&range, np, &pp->mem); | ||
185 | pp->mem.name = "MEM"; | ||
186 | pp->config.mem_size = resource_size(&pp->mem); | ||
187 | pp->config.mem_bus_addr = range.pci_addr; | ||
188 | } | ||
189 | if (restype == 0) { | ||
190 | of_pci_range_to_resource(&range, np, &pp->cfg); | ||
191 | pp->config.cfg0_size = resource_size(&pp->cfg)/2; | ||
192 | pp->config.cfg1_size = resource_size(&pp->cfg)/2; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | if (!pp->dbi_base) { | ||
197 | pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, | ||
198 | resource_size(&pp->cfg)); | ||
199 | if (!pp->dbi_base) { | ||
200 | dev_err(pp->dev, "error with ioremap\n"); | ||
201 | return -ENOMEM; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | pp->cfg0_base = pp->cfg.start; | ||
206 | pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; | ||
207 | pp->io_base = pp->io.start; | ||
208 | pp->mem_base = pp->mem.start; | ||
209 | |||
210 | pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, | ||
211 | pp->config.cfg0_size); | ||
212 | if (!pp->va_cfg0_base) { | ||
213 | dev_err(pp->dev, "error with ioremap in function\n"); | ||
214 | return -ENOMEM; | ||
215 | } | ||
216 | pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, | ||
217 | pp->config.cfg1_size); | ||
218 | if (!pp->va_cfg1_base) { | ||
219 | dev_err(pp->dev, "error with ioremap\n"); | ||
220 | return -ENOMEM; | ||
221 | } | ||
222 | |||
223 | if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { | ||
224 | dev_err(pp->dev, "Failed to parse the number of lanes\n"); | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | |||
228 | if (pp->ops->host_init) | ||
229 | pp->ops->host_init(pp); | ||
230 | |||
231 | dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); | ||
232 | |||
233 | /* program correct class for RC */ | ||
234 | dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); | ||
235 | |||
236 | dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); | ||
237 | val |= PORT_LOGIC_SPEED_CHANGE; | ||
238 | dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); | ||
239 | |||
240 | dw_pci.nr_controllers = 1; | ||
241 | dw_pci.private_data = (void **)&pp; | ||
242 | |||
243 | pci_common_init(&dw_pci); | ||
244 | pci_assign_unassigned_resources(); | ||
245 | #ifdef CONFIG_PCI_DOMAINS | ||
246 | dw_pci.domain++; | ||
247 | #endif | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) | ||
270 | { | 253 | { |
271 | u32 val; | 254 | u32 val; |
272 | void __iomem *dbi_base = pp->dbi_base; | 255 | void __iomem *dbi_base = pp->dbi_base; |
273 | 256 | ||
274 | /* Program viewport 0 : OUTBOUND : CFG0 */ | 257 | /* Program viewport 0 : OUTBOUND : CFG0 */ |
275 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; | 258 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; |
276 | writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); | 259 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); |
277 | writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE); | 260 | dw_pcie_writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE); |
278 | writel_rc(pp, (pp->cfg0_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); | 261 | dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), |
279 | writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1, | 262 | dbi_base + PCIE_ATU_UPPER_BASE); |
263 | dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1, | ||
280 | dbi_base + PCIE_ATU_LIMIT); | 264 | dbi_base + PCIE_ATU_LIMIT); |
281 | writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); | 265 | dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); |
282 | writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); | 266 | dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); |
283 | writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1); | 267 | dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1); |
284 | val = PCIE_ATU_ENABLE; | 268 | val = PCIE_ATU_ENABLE; |
285 | writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); | 269 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); |
286 | } | 270 | } |
287 | 271 | ||
288 | static void exynos_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) | 272 | static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) |
289 | { | 273 | { |
290 | u32 val; | 274 | u32 val; |
291 | void __iomem *dbi_base = pp->dbi_base; | 275 | void __iomem *dbi_base = pp->dbi_base; |
292 | 276 | ||
293 | /* Program viewport 1 : OUTBOUND : CFG1 */ | 277 | /* Program viewport 1 : OUTBOUND : CFG1 */ |
294 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; | 278 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; |
295 | writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); | 279 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); |
296 | writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1); | 280 | dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1); |
297 | val = PCIE_ATU_ENABLE; | 281 | val = PCIE_ATU_ENABLE; |
298 | writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); | 282 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); |
299 | writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE); | 283 | dw_pcie_writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE); |
300 | writel_rc(pp, (pp->cfg1_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); | 284 | dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), |
301 | writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, | 285 | dbi_base + PCIE_ATU_UPPER_BASE); |
286 | dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, | ||
302 | dbi_base + PCIE_ATU_LIMIT); | 287 | dbi_base + PCIE_ATU_LIMIT); |
303 | writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); | 288 | dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); |
304 | writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); | 289 | dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); |
305 | } | 290 | } |
306 | 291 | ||
307 | static void exynos_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) | 292 | static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) |
308 | { | 293 | { |
309 | u32 val; | 294 | u32 val; |
310 | void __iomem *dbi_base = pp->dbi_base; | 295 | void __iomem *dbi_base = pp->dbi_base; |
311 | 296 | ||
312 | /* Program viewport 0 : OUTBOUND : MEM */ | 297 | /* Program viewport 0 : OUTBOUND : MEM */ |
313 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; | 298 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; |
314 | writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); | 299 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); |
315 | writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1); | 300 | dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1); |
316 | val = PCIE_ATU_ENABLE; | 301 | val = PCIE_ATU_ENABLE; |
317 | writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); | 302 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); |
318 | writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE); | 303 | dw_pcie_writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE); |
319 | writel_rc(pp, (pp->mem_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); | 304 | dw_pcie_writel_rc(pp, (pp->mem_base >> 32), |
320 | writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, | 305 | dbi_base + PCIE_ATU_UPPER_BASE); |
306 | dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, | ||
321 | dbi_base + PCIE_ATU_LIMIT); | 307 | dbi_base + PCIE_ATU_LIMIT); |
322 | writel_rc(pp, pp->config.mem_bus_addr, | 308 | dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, |
323 | dbi_base + PCIE_ATU_LOWER_TARGET); | 309 | dbi_base + PCIE_ATU_LOWER_TARGET); |
324 | writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), | 310 | dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), |
325 | dbi_base + PCIE_ATU_UPPER_TARGET); | 311 | dbi_base + PCIE_ATU_UPPER_TARGET); |
326 | } | 312 | } |
327 | 313 | ||
328 | static void exynos_pcie_prog_viewport_io_outbound(struct pcie_port *pp) | 314 | static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) |
329 | { | 315 | { |
330 | u32 val; | 316 | u32 val; |
331 | void __iomem *dbi_base = pp->dbi_base; | 317 | void __iomem *dbi_base = pp->dbi_base; |
332 | 318 | ||
333 | /* Program viewport 1 : OUTBOUND : IO */ | 319 | /* Program viewport 1 : OUTBOUND : IO */ |
334 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; | 320 | val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; |
335 | writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); | 321 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); |
336 | writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1); | 322 | dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1); |
337 | val = PCIE_ATU_ENABLE; | 323 | val = PCIE_ATU_ENABLE; |
338 | writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); | 324 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); |
339 | writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE); | 325 | dw_pcie_writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE); |
340 | writel_rc(pp, (pp->io_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); | 326 | dw_pcie_writel_rc(pp, (pp->io_base >> 32), |
341 | writel_rc(pp, pp->io_base + pp->config.io_size - 1, | 327 | dbi_base + PCIE_ATU_UPPER_BASE); |
328 | dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1, | ||
342 | dbi_base + PCIE_ATU_LIMIT); | 329 | dbi_base + PCIE_ATU_LIMIT); |
343 | writel_rc(pp, pp->config.io_bus_addr, | 330 | dw_pcie_writel_rc(pp, pp->config.io_bus_addr, |
344 | dbi_base + PCIE_ATU_LOWER_TARGET); | 331 | dbi_base + PCIE_ATU_LOWER_TARGET); |
345 | writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), | 332 | dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), |
346 | dbi_base + PCIE_ATU_UPPER_TARGET); | 333 | dbi_base + PCIE_ATU_UPPER_TARGET); |
347 | } | 334 | } |
348 | 335 | ||
349 | static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | 336 | static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
350 | u32 devfn, int where, int size, u32 *val) | 337 | u32 devfn, int where, int size, u32 *val) |
351 | { | 338 | { |
352 | int ret = PCIBIOS_SUCCESSFUL; | 339 | int ret = PCIBIOS_SUCCESSFUL; |
@@ -357,19 +344,19 @@ static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
357 | address = where & ~0x3; | 344 | address = where & ~0x3; |
358 | 345 | ||
359 | if (bus->parent->number == pp->root_bus_nr) { | 346 | if (bus->parent->number == pp->root_bus_nr) { |
360 | exynos_pcie_prog_viewport_cfg0(pp, busdev); | 347 | dw_pcie_prog_viewport_cfg0(pp, busdev); |
361 | ret = cfg_read(pp->va_cfg0_base + address, where, size, val); | 348 | ret = cfg_read(pp->va_cfg0_base + address, where, size, val); |
362 | exynos_pcie_prog_viewport_mem_outbound(pp); | 349 | dw_pcie_prog_viewport_mem_outbound(pp); |
363 | } else { | 350 | } else { |
364 | exynos_pcie_prog_viewport_cfg1(pp, busdev); | 351 | dw_pcie_prog_viewport_cfg1(pp, busdev); |
365 | ret = cfg_read(pp->va_cfg1_base + address, where, size, val); | 352 | ret = cfg_read(pp->va_cfg1_base + address, where, size, val); |
366 | exynos_pcie_prog_viewport_io_outbound(pp); | 353 | dw_pcie_prog_viewport_io_outbound(pp); |
367 | } | 354 | } |
368 | 355 | ||
369 | return ret; | 356 | return ret; |
370 | } | 357 | } |
371 | 358 | ||
372 | static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | 359 | static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
373 | u32 devfn, int where, int size, u32 val) | 360 | u32 devfn, int where, int size, u32 val) |
374 | { | 361 | { |
375 | int ret = PCIBIOS_SUCCESSFUL; | 362 | int ret = PCIBIOS_SUCCESSFUL; |
@@ -380,59 +367,25 @@ static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
380 | address = where & ~0x3; | 367 | address = where & ~0x3; |
381 | 368 | ||
382 | if (bus->parent->number == pp->root_bus_nr) { | 369 | if (bus->parent->number == pp->root_bus_nr) { |
383 | exynos_pcie_prog_viewport_cfg0(pp, busdev); | 370 | dw_pcie_prog_viewport_cfg0(pp, busdev); |
384 | ret = cfg_write(pp->va_cfg0_base + address, where, size, val); | 371 | ret = cfg_write(pp->va_cfg0_base + address, where, size, val); |
385 | exynos_pcie_prog_viewport_mem_outbound(pp); | 372 | dw_pcie_prog_viewport_mem_outbound(pp); |
386 | } else { | 373 | } else { |
387 | exynos_pcie_prog_viewport_cfg1(pp, busdev); | 374 | dw_pcie_prog_viewport_cfg1(pp, busdev); |
388 | ret = cfg_write(pp->va_cfg1_base + address, where, size, val); | 375 | ret = cfg_write(pp->va_cfg1_base + address, where, size, val); |
389 | exynos_pcie_prog_viewport_io_outbound(pp); | 376 | dw_pcie_prog_viewport_io_outbound(pp); |
390 | } | 377 | } |
391 | 378 | ||
392 | return ret; | 379 | return ret; |
393 | } | 380 | } |
394 | 381 | ||
395 | static unsigned long global_io_offset; | ||
396 | 382 | ||
397 | static int exynos_pcie_setup(int nr, struct pci_sys_data *sys) | 383 | static int dw_pcie_valid_config(struct pcie_port *pp, |
398 | { | ||
399 | struct pcie_port *pp; | ||
400 | |||
401 | pp = sys_to_pcie(sys); | ||
402 | |||
403 | if (!pp) | ||
404 | return 0; | ||
405 | |||
406 | if (global_io_offset < SZ_1M && pp->config.io_size > 0) { | ||
407 | sys->io_offset = global_io_offset - pp->config.io_bus_addr; | ||
408 | pci_ioremap_io(sys->io_offset, pp->io.start); | ||
409 | global_io_offset += SZ_64K; | ||
410 | pci_add_resource_offset(&sys->resources, &pp->io, | ||
411 | sys->io_offset); | ||
412 | } | ||
413 | |||
414 | sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; | ||
415 | pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); | ||
416 | |||
417 | return 1; | ||
418 | } | ||
419 | |||
420 | static int exynos_pcie_link_up(struct pcie_port *pp) | ||
421 | { | ||
422 | u32 val = readl(pp->elbi_base + PCIE_ELBI_RDLH_LINKUP); | ||
423 | |||
424 | if (val == PCIE_ELBI_LTSSM_ENABLE) | ||
425 | return 1; | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int exynos_pcie_valid_config(struct pcie_port *pp, | ||
431 | struct pci_bus *bus, int dev) | 384 | struct pci_bus *bus, int dev) |
432 | { | 385 | { |
433 | /* If there is no link, then there is no device */ | 386 | /* If there is no link, then there is no device */ |
434 | if (bus->number != pp->root_bus_nr) { | 387 | if (bus->number != pp->root_bus_nr) { |
435 | if (!exynos_pcie_link_up(pp)) | 388 | if (!dw_pcie_link_up(pp)) |
436 | return 0; | 389 | return 0; |
437 | } | 390 | } |
438 | 391 | ||
@@ -450,7 +403,7 @@ static int exynos_pcie_valid_config(struct pcie_port *pp, | |||
450 | return 1; | 403 | return 1; |
451 | } | 404 | } |
452 | 405 | ||
453 | static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | 406 | static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, |
454 | int size, u32 *val) | 407 | int size, u32 *val) |
455 | { | 408 | { |
456 | struct pcie_port *pp = sys_to_pcie(bus->sysdata); | 409 | struct pcie_port *pp = sys_to_pcie(bus->sysdata); |
@@ -462,23 +415,23 @@ static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | |||
462 | return -EINVAL; | 415 | return -EINVAL; |
463 | } | 416 | } |
464 | 417 | ||
465 | if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { | 418 | if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { |
466 | *val = 0xffffffff; | 419 | *val = 0xffffffff; |
467 | return PCIBIOS_DEVICE_NOT_FOUND; | 420 | return PCIBIOS_DEVICE_NOT_FOUND; |
468 | } | 421 | } |
469 | 422 | ||
470 | spin_lock_irqsave(&pp->conf_lock, flags); | 423 | spin_lock_irqsave(&pp->conf_lock, flags); |
471 | if (bus->number != pp->root_bus_nr) | 424 | if (bus->number != pp->root_bus_nr) |
472 | ret = exynos_pcie_rd_other_conf(pp, bus, devfn, | 425 | ret = dw_pcie_rd_other_conf(pp, bus, devfn, |
473 | where, size, val); | 426 | where, size, val); |
474 | else | 427 | else |
475 | ret = exynos_pcie_rd_own_conf(pp, where, size, val); | 428 | ret = dw_pcie_rd_own_conf(pp, where, size, val); |
476 | spin_unlock_irqrestore(&pp->conf_lock, flags); | 429 | spin_unlock_irqrestore(&pp->conf_lock, flags); |
477 | 430 | ||
478 | return ret; | 431 | return ret; |
479 | } | 432 | } |
480 | 433 | ||
481 | static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | 434 | static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, |
482 | int where, int size, u32 val) | 435 | int where, int size, u32 val) |
483 | { | 436 | { |
484 | struct pcie_port *pp = sys_to_pcie(bus->sysdata); | 437 | struct pcie_port *pp = sys_to_pcie(bus->sysdata); |
@@ -490,34 +443,56 @@ static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | |||
490 | return -EINVAL; | 443 | return -EINVAL; |
491 | } | 444 | } |
492 | 445 | ||
493 | if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) | 446 | if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) |
494 | return PCIBIOS_DEVICE_NOT_FOUND; | 447 | return PCIBIOS_DEVICE_NOT_FOUND; |
495 | 448 | ||
496 | spin_lock_irqsave(&pp->conf_lock, flags); | 449 | spin_lock_irqsave(&pp->conf_lock, flags); |
497 | if (bus->number != pp->root_bus_nr) | 450 | if (bus->number != pp->root_bus_nr) |
498 | ret = exynos_pcie_wr_other_conf(pp, bus, devfn, | 451 | ret = dw_pcie_wr_other_conf(pp, bus, devfn, |
499 | where, size, val); | 452 | where, size, val); |
500 | else | 453 | else |
501 | ret = exynos_pcie_wr_own_conf(pp, where, size, val); | 454 | ret = dw_pcie_wr_own_conf(pp, where, size, val); |
502 | spin_unlock_irqrestore(&pp->conf_lock, flags); | 455 | spin_unlock_irqrestore(&pp->conf_lock, flags); |
503 | 456 | ||
504 | return ret; | 457 | return ret; |
505 | } | 458 | } |
506 | 459 | ||
507 | static struct pci_ops exynos_pcie_ops = { | 460 | static struct pci_ops dw_pcie_ops = { |
508 | .read = exynos_pcie_rd_conf, | 461 | .read = dw_pcie_rd_conf, |
509 | .write = exynos_pcie_wr_conf, | 462 | .write = dw_pcie_wr_conf, |
510 | }; | 463 | }; |
511 | 464 | ||
512 | static struct pci_bus *exynos_pcie_scan_bus(int nr, | 465 | int dw_pcie_setup(int nr, struct pci_sys_data *sys) |
513 | struct pci_sys_data *sys) | 466 | { |
467 | struct pcie_port *pp; | ||
468 | |||
469 | pp = sys_to_pcie(sys); | ||
470 | |||
471 | if (!pp) | ||
472 | return 0; | ||
473 | |||
474 | if (global_io_offset < SZ_1M && pp->config.io_size > 0) { | ||
475 | sys->io_offset = global_io_offset - pp->config.io_bus_addr; | ||
476 | pci_ioremap_io(sys->io_offset, pp->io.start); | ||
477 | global_io_offset += SZ_64K; | ||
478 | pci_add_resource_offset(&sys->resources, &pp->io, | ||
479 | sys->io_offset); | ||
480 | } | ||
481 | |||
482 | sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; | ||
483 | pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); | ||
484 | |||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) | ||
514 | { | 489 | { |
515 | struct pci_bus *bus; | 490 | struct pci_bus *bus; |
516 | struct pcie_port *pp = sys_to_pcie(sys); | 491 | struct pcie_port *pp = sys_to_pcie(sys); |
517 | 492 | ||
518 | if (pp) { | 493 | if (pp) { |
519 | pp->root_bus_nr = sys->busnr; | 494 | pp->root_bus_nr = sys->busnr; |
520 | bus = pci_scan_root_bus(NULL, sys->busnr, &exynos_pcie_ops, | 495 | bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops, |
521 | sys, &sys->resources); | 496 | sys, &sys->resources); |
522 | } else { | 497 | } else { |
523 | bus = NULL; | 498 | bus = NULL; |
@@ -527,20 +502,20 @@ static struct pci_bus *exynos_pcie_scan_bus(int nr, | |||
527 | return bus; | 502 | return bus; |
528 | } | 503 | } |
529 | 504 | ||
530 | static int exynos_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 505 | int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
531 | { | 506 | { |
532 | struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); | 507 | struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); |
533 | 508 | ||
534 | return pp->irq; | 509 | return pp->irq; |
535 | } | 510 | } |
536 | 511 | ||
537 | static struct hw_pci exynos_pci = { | 512 | static struct hw_pci dw_pci = { |
538 | .setup = exynos_pcie_setup, | 513 | .setup = dw_pcie_setup, |
539 | .scan = exynos_pcie_scan_bus, | 514 | .scan = dw_pcie_scan_bus, |
540 | .map_irq = exynos_pcie_map_irq, | 515 | .map_irq = dw_pcie_map_irq, |
541 | }; | 516 | }; |
542 | 517 | ||
543 | static void exynos_pcie_setup_rc(struct pcie_port *pp) | 518 | void dw_pcie_setup_rc(struct pcie_port *pp) |
544 | { | 519 | { |
545 | struct pcie_port_info *config = &pp->config; | 520 | struct pcie_port_info *config = &pp->config; |
546 | void __iomem *dbi_base = pp->dbi_base; | 521 | void __iomem *dbi_base = pp->dbi_base; |
@@ -549,509 +524,67 @@ static void exynos_pcie_setup_rc(struct pcie_port *pp) | |||
549 | u32 memlimit; | 524 | u32 memlimit; |
550 | 525 | ||
551 | /* set the number of lines as 4 */ | 526 | /* set the number of lines as 4 */ |
552 | readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val); | 527 | dw_pcie_readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val); |
553 | val &= ~PORT_LINK_MODE_MASK; | 528 | val &= ~PORT_LINK_MODE_MASK; |
554 | val |= PORT_LINK_MODE_4_LANES; | 529 | switch (pp->lanes) { |
555 | writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL); | 530 | case 1: |
531 | val |= PORT_LINK_MODE_1_LANES; | ||
532 | break; | ||
533 | case 2: | ||
534 | val |= PORT_LINK_MODE_2_LANES; | ||
535 | break; | ||
536 | case 4: | ||
537 | val |= PORT_LINK_MODE_4_LANES; | ||
538 | break; | ||
539 | } | ||
540 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL); | ||
556 | 541 | ||
557 | /* set link width speed control register */ | 542 | /* set link width speed control register */ |
558 | readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val); | 543 | dw_pcie_readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val); |
559 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; | 544 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; |
560 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; | 545 | switch (pp->lanes) { |
561 | writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | 546 | case 1: |
547 | val |= PORT_LOGIC_LINK_WIDTH_1_LANES; | ||
548 | break; | ||
549 | case 2: | ||
550 | val |= PORT_LOGIC_LINK_WIDTH_2_LANES; | ||
551 | break; | ||
552 | case 4: | ||
553 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; | ||
554 | break; | ||
555 | } | ||
556 | dw_pcie_writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
562 | 557 | ||
563 | /* setup RC BARs */ | 558 | /* setup RC BARs */ |
564 | writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0); | 559 | dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0); |
565 | writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1); | 560 | dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1); |
566 | 561 | ||
567 | /* setup interrupt pins */ | 562 | /* setup interrupt pins */ |
568 | readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val); | 563 | dw_pcie_readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val); |
569 | val &= 0xffff00ff; | 564 | val &= 0xffff00ff; |
570 | val |= 0x00000100; | 565 | val |= 0x00000100; |
571 | writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE); | 566 | dw_pcie_writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE); |
572 | 567 | ||
573 | /* setup bus numbers */ | 568 | /* setup bus numbers */ |
574 | readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val); | 569 | dw_pcie_readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val); |
575 | val &= 0xff000000; | 570 | val &= 0xff000000; |
576 | val |= 0x00010100; | 571 | val |= 0x00010100; |
577 | writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS); | 572 | dw_pcie_writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS); |
578 | 573 | ||
579 | /* setup memory base, memory limit */ | 574 | /* setup memory base, memory limit */ |
580 | membase = ((u32)pp->mem_base & 0xfff00000) >> 16; | 575 | membase = ((u32)pp->mem_base & 0xfff00000) >> 16; |
581 | memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; | 576 | memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; |
582 | val = memlimit | membase; | 577 | val = memlimit | membase; |
583 | writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE); | 578 | dw_pcie_writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE); |
584 | 579 | ||
585 | /* setup command register */ | 580 | /* setup command register */ |
586 | readl_rc(pp, dbi_base + PCI_COMMAND, &val); | 581 | dw_pcie_readl_rc(pp, dbi_base + PCI_COMMAND, &val); |
587 | val &= 0xffff0000; | 582 | val &= 0xffff0000; |
588 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | 583 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
589 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; | 584 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; |
590 | writel_rc(pp, val, dbi_base + PCI_COMMAND); | 585 | dw_pcie_writel_rc(pp, val, dbi_base + PCI_COMMAND); |
591 | } | ||
592 | |||
593 | static void exynos_pcie_assert_core_reset(struct pcie_port *pp) | ||
594 | { | ||
595 | u32 val; | ||
596 | void __iomem *elbi_base = pp->elbi_base; | ||
597 | |||
598 | val = readl(elbi_base + PCIE_CORE_RESET); | ||
599 | val &= ~PCIE_CORE_RESET_ENABLE; | ||
600 | writel(val, elbi_base + PCIE_CORE_RESET); | ||
601 | writel(0, elbi_base + PCIE_PWR_RESET); | ||
602 | writel(0, elbi_base + PCIE_STICKY_RESET); | ||
603 | writel(0, elbi_base + PCIE_NONSTICKY_RESET); | ||
604 | } | ||
605 | |||
606 | static void exynos_pcie_deassert_core_reset(struct pcie_port *pp) | ||
607 | { | ||
608 | u32 val; | ||
609 | void __iomem *elbi_base = pp->elbi_base; | ||
610 | void __iomem *purple_base = pp->purple_base; | ||
611 | |||
612 | val = readl(elbi_base + PCIE_CORE_RESET); | ||
613 | val |= PCIE_CORE_RESET_ENABLE; | ||
614 | writel(val, elbi_base + PCIE_CORE_RESET); | ||
615 | writel(1, elbi_base + PCIE_STICKY_RESET); | ||
616 | writel(1, elbi_base + PCIE_NONSTICKY_RESET); | ||
617 | writel(1, elbi_base + PCIE_APP_INIT_RESET); | ||
618 | writel(0, elbi_base + PCIE_APP_INIT_RESET); | ||
619 | writel(1, purple_base + PCIE_PHY_MAC_RESET); | ||
620 | } | ||
621 | |||
622 | static void exynos_pcie_assert_phy_reset(struct pcie_port *pp) | ||
623 | { | ||
624 | void __iomem *purple_base = pp->purple_base; | ||
625 | |||
626 | writel(0, purple_base + PCIE_PHY_MAC_RESET); | ||
627 | writel(1, purple_base + PCIE_PHY_GLOBAL_RESET); | ||
628 | } | ||
629 | |||
630 | static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp) | ||
631 | { | ||
632 | void __iomem *elbi_base = pp->elbi_base; | ||
633 | void __iomem *purple_base = pp->purple_base; | ||
634 | |||
635 | writel(0, purple_base + PCIE_PHY_GLOBAL_RESET); | ||
636 | writel(1, elbi_base + PCIE_PWR_RESET); | ||
637 | writel(0, purple_base + PCIE_PHY_COMMON_RESET); | ||
638 | writel(0, purple_base + PCIE_PHY_CMN_REG); | ||
639 | writel(0, purple_base + PCIE_PHY_TRSVREG_RESET); | ||
640 | writel(0, purple_base + PCIE_PHY_TRSV_RESET); | ||
641 | } | ||
642 | |||
643 | static void exynos_pcie_init_phy(struct pcie_port *pp) | ||
644 | { | ||
645 | void __iomem *phy_base = pp->phy_base; | ||
646 | |||
647 | /* DCC feedback control off */ | ||
648 | writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK); | ||
649 | |||
650 | /* set TX/RX impedance */ | ||
651 | writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE); | ||
652 | |||
653 | /* set 50Mhz PHY clock */ | ||
654 | writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0); | ||
655 | writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1); | ||
656 | |||
657 | /* set TX Differential output for lane 0 */ | ||
658 | writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL); | ||
659 | |||
660 | /* set TX Pre-emphasis Level Control for lane 0 to minimum */ | ||
661 | writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL); | ||
662 | |||
663 | /* set RX clock and data recovery bandwidth */ | ||
664 | writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS); | ||
665 | writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR); | ||
666 | writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR); | ||
667 | writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR); | ||
668 | writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR); | ||
669 | |||
670 | /* change TX Pre-emphasis Level Control for lanes */ | ||
671 | writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL); | ||
672 | writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL); | ||
673 | writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL); | ||
674 | writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL); | ||
675 | |||
676 | /* set LVCC */ | ||
677 | writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC); | ||
678 | writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC); | ||
679 | writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC); | ||
680 | writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC); | ||
681 | } | ||
682 | |||
683 | static void exynos_pcie_assert_reset(struct pcie_port *pp) | ||
684 | { | ||
685 | if (pp->reset_gpio >= 0) | ||
686 | devm_gpio_request_one(pp->dev, pp->reset_gpio, | ||
687 | GPIOF_OUT_INIT_HIGH, "RESET"); | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | static int exynos_pcie_establish_link(struct pcie_port *pp) | ||
692 | { | ||
693 | u32 val; | ||
694 | int count = 0; | ||
695 | void __iomem *elbi_base = pp->elbi_base; | ||
696 | void __iomem *purple_base = pp->purple_base; | ||
697 | void __iomem *phy_base = pp->phy_base; | ||
698 | |||
699 | if (exynos_pcie_link_up(pp)) { | ||
700 | dev_err(pp->dev, "Link already up\n"); | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | /* assert reset signals */ | ||
705 | exynos_pcie_assert_core_reset(pp); | ||
706 | exynos_pcie_assert_phy_reset(pp); | ||
707 | |||
708 | /* de-assert phy reset */ | ||
709 | exynos_pcie_deassert_phy_reset(pp); | ||
710 | |||
711 | /* initialize phy */ | ||
712 | exynos_pcie_init_phy(pp); | ||
713 | |||
714 | /* pulse for common reset */ | ||
715 | writel(1, purple_base + PCIE_PHY_COMMON_RESET); | ||
716 | udelay(500); | ||
717 | writel(0, purple_base + PCIE_PHY_COMMON_RESET); | ||
718 | |||
719 | /* de-assert core reset */ | ||
720 | exynos_pcie_deassert_core_reset(pp); | ||
721 | |||
722 | /* setup root complex */ | ||
723 | exynos_pcie_setup_rc(pp); | ||
724 | |||
725 | /* assert reset signal */ | ||
726 | exynos_pcie_assert_reset(pp); | ||
727 | |||
728 | /* assert LTSSM enable */ | ||
729 | writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE); | ||
730 | |||
731 | /* check if the link is up or not */ | ||
732 | while (!exynos_pcie_link_up(pp)) { | ||
733 | mdelay(100); | ||
734 | count++; | ||
735 | if (count == 10) { | ||
736 | while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) { | ||
737 | val = readl(purple_base + PCIE_PHY_PLL_LOCKED); | ||
738 | dev_info(pp->dev, "PLL Locked: 0x%x\n", val); | ||
739 | } | ||
740 | dev_err(pp->dev, "PCIe Link Fail\n"); | ||
741 | return -EINVAL; | ||
742 | } | ||
743 | } | ||
744 | |||
745 | dev_info(pp->dev, "Link up\n"); | ||
746 | |||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) | ||
751 | { | ||
752 | u32 val; | ||
753 | void __iomem *elbi_base = pp->elbi_base; | ||
754 | |||
755 | val = readl(elbi_base + PCIE_IRQ_PULSE); | ||
756 | writel(val, elbi_base + PCIE_IRQ_PULSE); | ||
757 | return; | ||
758 | } | ||
759 | |||
760 | static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp) | ||
761 | { | ||
762 | u32 val; | ||
763 | void __iomem *elbi_base = pp->elbi_base; | ||
764 | |||
765 | /* enable INTX interrupt */ | ||
766 | val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | | ||
767 | IRQ_INTC_ASSERT | IRQ_INTD_ASSERT, | ||
768 | writel(val, elbi_base + PCIE_IRQ_EN_PULSE); | ||
769 | return; | ||
770 | } | ||
771 | |||
772 | static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) | ||
773 | { | ||
774 | struct pcie_port *pp = arg; | ||
775 | |||
776 | exynos_pcie_clear_irq_pulse(pp); | ||
777 | return IRQ_HANDLED; | ||
778 | } | ||
779 | |||
780 | static void exynos_pcie_enable_interrupts(struct pcie_port *pp) | ||
781 | { | ||
782 | exynos_pcie_enable_irq_pulse(pp); | ||
783 | return; | ||
784 | } | ||
785 | |||
786 | static void exynos_pcie_host_init(struct pcie_port *pp) | ||
787 | { | ||
788 | struct pcie_port_info *config = &pp->config; | ||
789 | u32 val; | ||
790 | |||
791 | /* Keep first 64K for IO */ | ||
792 | pp->cfg0_base = pp->cfg.start; | ||
793 | pp->cfg1_base = pp->cfg.start + config->cfg0_size; | ||
794 | pp->io_base = pp->io.start; | ||
795 | pp->mem_base = pp->mem.start; | ||
796 | |||
797 | /* enable link */ | ||
798 | exynos_pcie_establish_link(pp); | ||
799 | |||
800 | exynos_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); | ||
801 | |||
802 | /* program correct class for RC */ | ||
803 | exynos_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); | ||
804 | |||
805 | exynos_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); | ||
806 | val |= PORT_LOGIC_SPEED_CHANGE; | ||
807 | exynos_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); | ||
808 | |||
809 | exynos_pcie_enable_interrupts(pp); | ||
810 | } | ||
811 | |||
812 | static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) | ||
813 | { | ||
814 | struct resource *elbi_base; | ||
815 | struct resource *phy_base; | ||
816 | struct resource *purple_base; | ||
817 | int ret; | ||
818 | |||
819 | elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
820 | if (!elbi_base) { | ||
821 | dev_err(&pdev->dev, "couldn't get elbi base resource\n"); | ||
822 | return -EINVAL; | ||
823 | } | ||
824 | pp->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base); | ||
825 | if (IS_ERR(pp->elbi_base)) | ||
826 | return PTR_ERR(pp->elbi_base); | ||
827 | |||
828 | phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
829 | if (!phy_base) { | ||
830 | dev_err(&pdev->dev, "couldn't get phy base resource\n"); | ||
831 | return -EINVAL; | ||
832 | } | ||
833 | pp->phy_base = devm_ioremap_resource(&pdev->dev, phy_base); | ||
834 | if (IS_ERR(pp->phy_base)) | ||
835 | return PTR_ERR(pp->phy_base); | ||
836 | |||
837 | purple_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
838 | if (!purple_base) { | ||
839 | dev_err(&pdev->dev, "couldn't get purple base resource\n"); | ||
840 | return -EINVAL; | ||
841 | } | ||
842 | pp->purple_base = devm_ioremap_resource(&pdev->dev, purple_base); | ||
843 | if (IS_ERR(pp->purple_base)) | ||
844 | return PTR_ERR(pp->purple_base); | ||
845 | |||
846 | pp->irq = platform_get_irq(pdev, 1); | ||
847 | if (!pp->irq) { | ||
848 | dev_err(&pdev->dev, "failed to get irq\n"); | ||
849 | return -ENODEV; | ||
850 | } | ||
851 | ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler, | ||
852 | IRQF_SHARED, "exynos-pcie", pp); | ||
853 | if (ret) { | ||
854 | dev_err(&pdev->dev, "failed to request irq\n"); | ||
855 | return ret; | ||
856 | } | ||
857 | |||
858 | pp->dbi_base = devm_ioremap(&pdev->dev, pp->cfg.start, | ||
859 | resource_size(&pp->cfg)); | ||
860 | if (!pp->dbi_base) { | ||
861 | dev_err(&pdev->dev, "error with ioremap\n"); | ||
862 | return -ENOMEM; | ||
863 | } | ||
864 | |||
865 | pp->root_bus_nr = -1; | ||
866 | |||
867 | spin_lock_init(&pp->conf_lock); | ||
868 | exynos_pcie_host_init(pp); | ||
869 | pp->va_cfg0_base = devm_ioremap(&pdev->dev, pp->cfg0_base, | ||
870 | pp->config.cfg0_size); | ||
871 | if (!pp->va_cfg0_base) { | ||
872 | dev_err(pp->dev, "error with ioremap in function\n"); | ||
873 | return -ENOMEM; | ||
874 | } | ||
875 | pp->va_cfg1_base = devm_ioremap(&pdev->dev, pp->cfg1_base, | ||
876 | pp->config.cfg1_size); | ||
877 | if (!pp->va_cfg1_base) { | ||
878 | dev_err(pp->dev, "error with ioremap\n"); | ||
879 | return -ENOMEM; | ||
880 | } | ||
881 | |||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | static int __init exynos_pcie_probe(struct platform_device *pdev) | ||
886 | { | ||
887 | struct pcie_port *pp; | ||
888 | struct device_node *np = pdev->dev.of_node; | ||
889 | struct of_pci_range range; | ||
890 | struct of_pci_range_parser parser; | ||
891 | int ret; | ||
892 | |||
893 | pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL); | ||
894 | if (!pp) { | ||
895 | dev_err(&pdev->dev, "no memory for pcie port\n"); | ||
896 | return -ENOMEM; | ||
897 | } | ||
898 | |||
899 | pp->dev = &pdev->dev; | ||
900 | |||
901 | if (of_pci_range_parser_init(&parser, np)) { | ||
902 | dev_err(&pdev->dev, "missing ranges property\n"); | ||
903 | return -EINVAL; | ||
904 | } | ||
905 | |||
906 | /* Get the I/O and memory ranges from DT */ | ||
907 | for_each_of_pci_range(&parser, &range) { | ||
908 | unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; | ||
909 | if (restype == IORESOURCE_IO) { | ||
910 | of_pci_range_to_resource(&range, np, &pp->io); | ||
911 | pp->io.name = "I/O"; | ||
912 | pp->io.start = max_t(resource_size_t, | ||
913 | PCIBIOS_MIN_IO, | ||
914 | range.pci_addr + global_io_offset); | ||
915 | pp->io.end = min_t(resource_size_t, | ||
916 | IO_SPACE_LIMIT, | ||
917 | range.pci_addr + range.size | ||
918 | + global_io_offset); | ||
919 | pp->config.io_size = resource_size(&pp->io); | ||
920 | pp->config.io_bus_addr = range.pci_addr; | ||
921 | } | ||
922 | if (restype == IORESOURCE_MEM) { | ||
923 | of_pci_range_to_resource(&range, np, &pp->mem); | ||
924 | pp->mem.name = "MEM"; | ||
925 | pp->config.mem_size = resource_size(&pp->mem); | ||
926 | pp->config.mem_bus_addr = range.pci_addr; | ||
927 | } | ||
928 | if (restype == 0) { | ||
929 | of_pci_range_to_resource(&range, np, &pp->cfg); | ||
930 | pp->config.cfg0_size = resource_size(&pp->cfg)/2; | ||
931 | pp->config.cfg1_size = resource_size(&pp->cfg)/2; | ||
932 | } | ||
933 | } | ||
934 | |||
935 | pp->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); | ||
936 | |||
937 | pp->clk = devm_clk_get(&pdev->dev, "pcie"); | ||
938 | if (IS_ERR(pp->clk)) { | ||
939 | dev_err(&pdev->dev, "Failed to get pcie rc clock\n"); | ||
940 | return PTR_ERR(pp->clk); | ||
941 | } | ||
942 | ret = clk_prepare_enable(pp->clk); | ||
943 | if (ret) | ||
944 | return ret; | ||
945 | |||
946 | pp->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); | ||
947 | if (IS_ERR(pp->bus_clk)) { | ||
948 | dev_err(&pdev->dev, "Failed to get pcie bus clock\n"); | ||
949 | ret = PTR_ERR(pp->bus_clk); | ||
950 | goto fail_clk; | ||
951 | } | ||
952 | ret = clk_prepare_enable(pp->bus_clk); | ||
953 | if (ret) | ||
954 | goto fail_clk; | ||
955 | |||
956 | ret = add_pcie_port(pp, pdev); | ||
957 | if (ret < 0) | ||
958 | goto fail_bus_clk; | ||
959 | |||
960 | pp->controller = exynos_pci.nr_controllers; | ||
961 | exynos_pci.nr_controllers = 1; | ||
962 | exynos_pci.private_data = (void **)&pp; | ||
963 | |||
964 | pci_common_init(&exynos_pci); | ||
965 | pci_assign_unassigned_resources(); | ||
966 | #ifdef CONFIG_PCI_DOMAINS | ||
967 | exynos_pci.domain++; | ||
968 | #endif | ||
969 | |||
970 | platform_set_drvdata(pdev, pp); | ||
971 | return 0; | ||
972 | |||
973 | fail_bus_clk: | ||
974 | clk_disable_unprepare(pp->bus_clk); | ||
975 | fail_clk: | ||
976 | clk_disable_unprepare(pp->clk); | ||
977 | return ret; | ||
978 | } | ||
979 | |||
980 | static int __exit exynos_pcie_remove(struct platform_device *pdev) | ||
981 | { | ||
982 | struct pcie_port *pp = platform_get_drvdata(pdev); | ||
983 | |||
984 | clk_disable_unprepare(pp->bus_clk); | ||
985 | clk_disable_unprepare(pp->clk); | ||
986 | |||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | static const struct of_device_id exynos_pcie_of_match[] = { | ||
991 | { .compatible = "samsung,exynos5440-pcie", }, | ||
992 | {}, | ||
993 | }; | ||
994 | MODULE_DEVICE_TABLE(of, exynos_pcie_of_match); | ||
995 | |||
996 | static struct platform_driver exynos_pcie_driver = { | ||
997 | .remove = __exit_p(exynos_pcie_remove), | ||
998 | .driver = { | ||
999 | .name = "exynos-pcie", | ||
1000 | .owner = THIS_MODULE, | ||
1001 | .of_match_table = of_match_ptr(exynos_pcie_of_match), | ||
1002 | }, | ||
1003 | }; | ||
1004 | |||
1005 | static int exynos_pcie_abort(unsigned long addr, unsigned int fsr, | ||
1006 | struct pt_regs *regs) | ||
1007 | { | ||
1008 | unsigned long pc = instruction_pointer(regs); | ||
1009 | unsigned long instr = *(unsigned long *)pc; | ||
1010 | |||
1011 | WARN_ONCE(1, "pcie abort\n"); | ||
1012 | |||
1013 | /* | ||
1014 | * If the instruction being executed was a read, | ||
1015 | * make it look like it read all-ones. | ||
1016 | */ | ||
1017 | if ((instr & 0x0c100000) == 0x04100000) { | ||
1018 | int reg = (instr >> 12) & 15; | ||
1019 | unsigned long val; | ||
1020 | |||
1021 | if (instr & 0x00400000) | ||
1022 | val = 255; | ||
1023 | else | ||
1024 | val = -1; | ||
1025 | |||
1026 | regs->uregs[reg] = val; | ||
1027 | regs->ARM_pc += 4; | ||
1028 | return 0; | ||
1029 | } | ||
1030 | |||
1031 | if ((instr & 0x0e100090) == 0x00100090) { | ||
1032 | int reg = (instr >> 12) & 15; | ||
1033 | |||
1034 | regs->uregs[reg] = -1; | ||
1035 | regs->ARM_pc += 4; | ||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | return 1; | ||
1040 | } | ||
1041 | |||
1042 | /* Exynos PCIe driver does not allow module unload */ | ||
1043 | |||
1044 | static int __init pcie_init(void) | ||
1045 | { | ||
1046 | hook_fault_code(16 + 6, exynos_pcie_abort, SIGBUS, 0, | ||
1047 | "imprecise external abort"); | ||
1048 | |||
1049 | platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); | ||
1050 | |||
1051 | return 0; | ||
1052 | } | 586 | } |
1053 | subsys_initcall(pcie_init); | ||
1054 | 587 | ||
1055 | MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); | 588 | MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); |
1056 | MODULE_DESCRIPTION("Samsung PCIe host controller driver"); | 589 | MODULE_DESCRIPTION("Designware PCIe host controller driver"); |
1057 | MODULE_LICENSE("GPL v2"); | 590 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h new file mode 100644 index 000000000000..133820f1da97 --- /dev/null +++ b/drivers/pci/host/pcie-designware.h | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Synopsys Designware PCIe host controller driver | ||
3 | * | ||
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com | ||
6 | * | ||
7 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | struct pcie_port_info { | ||
15 | u32 cfg0_size; | ||
16 | u32 cfg1_size; | ||
17 | u32 io_size; | ||
18 | u32 mem_size; | ||
19 | phys_addr_t io_bus_addr; | ||
20 | phys_addr_t mem_bus_addr; | ||
21 | }; | ||
22 | |||
23 | struct pcie_port { | ||
24 | struct device *dev; | ||
25 | u8 root_bus_nr; | ||
26 | void __iomem *dbi_base; | ||
27 | u64 cfg0_base; | ||
28 | void __iomem *va_cfg0_base; | ||
29 | u64 cfg1_base; | ||
30 | void __iomem *va_cfg1_base; | ||
31 | u64 io_base; | ||
32 | u64 mem_base; | ||
33 | spinlock_t conf_lock; | ||
34 | struct resource cfg; | ||
35 | struct resource io; | ||
36 | struct resource mem; | ||
37 | struct pcie_port_info config; | ||
38 | int irq; | ||
39 | u32 lanes; | ||
40 | struct pcie_host_ops *ops; | ||
41 | }; | ||
42 | |||
43 | struct pcie_host_ops { | ||
44 | void (*readl_rc)(struct pcie_port *pp, | ||
45 | void __iomem *dbi_base, u32 *val); | ||
46 | void (*writel_rc)(struct pcie_port *pp, | ||
47 | u32 val, void __iomem *dbi_base); | ||
48 | int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); | ||
49 | int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); | ||
50 | int (*link_up)(struct pcie_port *pp); | ||
51 | void (*host_init)(struct pcie_port *pp); | ||
52 | }; | ||
53 | |||
54 | extern unsigned long global_io_offset; | ||
55 | |||
56 | int cfg_read(void __iomem *addr, int where, int size, u32 *val); | ||
57 | int cfg_write(void __iomem *addr, int where, int size, u32 val); | ||
58 | int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val); | ||
59 | int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val); | ||
60 | int dw_pcie_link_up(struct pcie_port *pp); | ||
61 | void dw_pcie_setup_rc(struct pcie_port *pp); | ||
62 | int dw_pcie_host_init(struct pcie_port *pp); | ||
63 | int dw_pcie_setup(int nr, struct pci_sys_data *sys); | ||
64 | struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys); | ||
65 | int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); | ||