aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/controller/dwc/pci-keystone.c
diff options
context:
space:
mode:
authorShawn Lin <shawn.lin@rock-chips.com>2018-05-30 21:12:37 -0400
committerBjorn Helgaas <bhelgaas@google.com>2018-06-08 08:50:11 -0400
commit6e0832fa432ec99c94caee733c8f5851cf85560b (patch)
treec4326f9e2d8ff1a6cb17e959fc5268c9e577ca94 /drivers/pci/controller/dwc/pci-keystone.c
parent3a3869f1c443383ef8354ffa0e5fb8df65d8b549 (diff)
PCI: Collect all native drivers under drivers/pci/controller/
Native PCI drivers for root complex devices were originally all in drivers/pci/host/. Some of these devices can also be operated in endpoint mode. Drivers for endpoint mode didn't seem to fit in the "host" directory, so we put both the root complex and endpoint drivers in per-device directories, e.g., drivers/pci/dwc/, drivers/pci/cadence/, etc. These per-device directories contain trivial Kconfig and Makefiles and clutter drivers/pci/. Make a new drivers/pci/controllers/ directory and collect all the device-specific drivers there. No functional change intended. Link: https://lkml.kernel.org/r/1520304202-232891-1-git-send-email-shawn.lin@rock-chips.com Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com> [bhelgaas: changelog] Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Diffstat (limited to 'drivers/pci/controller/dwc/pci-keystone.c')
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c457
1 files changed, 457 insertions, 0 deletions
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
new file mode 100644
index 000000000000..3722a5f31e5e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -0,0 +1,457 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCIe host controller driver for Texas Instruments Keystone SoCs
4 *
5 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
6 * http://www.ti.com
7 *
8 * Author: Murali Karicheri <m-karicheri2@ti.com>
9 * Implementation based on pci-exynos.c and pcie-designware.c
10 */
11
12#include <linux/irqchip/chained_irq.h>
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/init.h>
18#include <linux/msi.h>
19#include <linux/of_irq.h>
20#include <linux/of.h>
21#include <linux/of_pci.h>
22#include <linux/platform_device.h>
23#include <linux/phy/phy.h>
24#include <linux/resource.h>
25#include <linux/signal.h>
26
27#include "pcie-designware.h"
28#include "pci-keystone.h"
29
30#define DRIVER_NAME "keystone-pcie"
31
32/* DEV_STAT_CTRL */
33#define PCIE_CAP_BASE 0x70
34
35/* PCIE controller device IDs */
36#define PCIE_RC_K2HK 0xb008
37#define PCIE_RC_K2E 0xb009
38#define PCIE_RC_K2L 0xb00a
39
40#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
41
42static void quirk_limit_mrrs(struct pci_dev *dev)
43{
44 struct pci_bus *bus = dev->bus;
45 struct pci_dev *bridge = bus->self;
46 static const struct pci_device_id rc_pci_devids[] = {
47 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
48 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
49 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
50 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
51 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
52 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
53 { 0, },
54 };
55
56 if (pci_is_root_bus(bus))
57 return;
58
59 /* look for the host bridge */
60 while (!pci_is_root_bus(bus)) {
61 bridge = bus->self;
62 bus = bus->parent;
63 }
64
65 if (bridge) {
66 /*
67 * Keystone PCI controller has a h/w limitation of
68 * 256 bytes maximum read request size. It can't handle
69 * anything higher than this. So force this limit on
70 * all downstream devices.
71 */
72 if (pci_match_id(rc_pci_devids, bridge)) {
73 if (pcie_get_readrq(dev) > 256) {
74 dev_info(&dev->dev, "limiting MRRS to 256\n");
75 pcie_set_readrq(dev, 256);
76 }
77 }
78 }
79}
80DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
81
82static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
83{
84 struct dw_pcie *pci = ks_pcie->pci;
85 struct pcie_port *pp = &pci->pp;
86 struct device *dev = pci->dev;
87 unsigned int retries;
88
89 dw_pcie_setup_rc(pp);
90
91 if (dw_pcie_link_up(pci)) {
92 dev_info(dev, "Link already up\n");
93 return 0;
94 }
95
96 /* check if the link is up or not */
97 for (retries = 0; retries < 5; retries++) {
98 ks_dw_pcie_initiate_link_train(ks_pcie);
99 if (!dw_pcie_wait_for_link(pci))
100 return 0;
101 }
102
103 dev_err(dev, "phy link never came up\n");
104 return -ETIMEDOUT;
105}
106
107static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
108{
109 unsigned int irq = irq_desc_get_irq(desc);
110 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
111 u32 offset = irq - ks_pcie->msi_host_irqs[0];
112 struct dw_pcie *pci = ks_pcie->pci;
113 struct device *dev = pci->dev;
114 struct irq_chip *chip = irq_desc_get_chip(desc);
115
116 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
117
118 /*
119 * The chained irq handler installation would have replaced normal
120 * interrupt driver handler so we need to take care of mask/unmask and
121 * ack operation.
122 */
123 chained_irq_enter(chip, desc);
124 ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
125 chained_irq_exit(chip, desc);
126}
127
128/**
129 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
130 * @irq: IRQ line for legacy interrupts
131 * @desc: Pointer to irq descriptor
132 *
133 * Traverse through pending legacy interrupts and invoke handler for each. Also
134 * takes care of interrupt controller level mask/ack operation.
135 */
136static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
137{
138 unsigned int irq = irq_desc_get_irq(desc);
139 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
140 struct dw_pcie *pci = ks_pcie->pci;
141 struct device *dev = pci->dev;
142 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
143 struct irq_chip *chip = irq_desc_get_chip(desc);
144
145 dev_dbg(dev, ": Handling legacy irq %d\n", irq);
146
147 /*
148 * The chained irq handler installation would have replaced normal
149 * interrupt driver handler so we need to take care of mask/unmask and
150 * ack operation.
151 */
152 chained_irq_enter(chip, desc);
153 ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
154 chained_irq_exit(chip, desc);
155}
156
157static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
158 char *controller, int *num_irqs)
159{
160 int temp, max_host_irqs, legacy = 1, *host_irqs;
161 struct device *dev = ks_pcie->pci->dev;
162 struct device_node *np_pcie = dev->of_node, **np_temp;
163
164 if (!strcmp(controller, "msi-interrupt-controller"))
165 legacy = 0;
166
167 if (legacy) {
168 np_temp = &ks_pcie->legacy_intc_np;
169 max_host_irqs = PCI_NUM_INTX;
170 host_irqs = &ks_pcie->legacy_host_irqs[0];
171 } else {
172 np_temp = &ks_pcie->msi_intc_np;
173 max_host_irqs = MAX_MSI_HOST_IRQS;
174 host_irqs = &ks_pcie->msi_host_irqs[0];
175 }
176
177 /* interrupt controller is in a child node */
178 *np_temp = of_get_child_by_name(np_pcie, controller);
179 if (!(*np_temp)) {
180 dev_err(dev, "Node for %s is absent\n", controller);
181 return -EINVAL;
182 }
183
184 temp = of_irq_count(*np_temp);
185 if (!temp) {
186 dev_err(dev, "No IRQ entries in %s\n", controller);
187 of_node_put(*np_temp);
188 return -EINVAL;
189 }
190
191 if (temp > max_host_irqs)
192 dev_warn(dev, "Too many %s interrupts defined %u\n",
193 (legacy ? "legacy" : "MSI"), temp);
194
195 /*
196 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
197 * 7 (MSI)
198 */
199 for (temp = 0; temp < max_host_irqs; temp++) {
200 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
201 if (!host_irqs[temp])
202 break;
203 }
204
205 of_node_put(*np_temp);
206
207 if (temp) {
208 *num_irqs = temp;
209 return 0;
210 }
211
212 return -EINVAL;
213}
214
215static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
216{
217 int i;
218
219 /* Legacy IRQ */
220 for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
221 irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
222 ks_pcie_legacy_irq_handler,
223 ks_pcie);
224 }
225 ks_dw_pcie_enable_legacy_irqs(ks_pcie);
226
227 /* MSI IRQ */
228 if (IS_ENABLED(CONFIG_PCI_MSI)) {
229 for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
230 irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
231 ks_pcie_msi_irq_handler,
232 ks_pcie);
233 }
234 }
235
236 if (ks_pcie->error_irq > 0)
237 ks_dw_pcie_enable_error_irq(ks_pcie);
238}
239
240/*
241 * When a PCI device does not exist during config cycles, keystone host gets a
242 * bus error instead of returning 0xffffffff. This handler always returns 0
243 * for this kind of faults.
244 */
245static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
246 struct pt_regs *regs)
247{
248 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
249
250 if ((instr & 0x0e100090) == 0x00100090) {
251 int reg = (instr >> 12) & 15;
252
253 regs->uregs[reg] = -1;
254 regs->ARM_pc += 4;
255 }
256
257 return 0;
258}
259
260static int __init ks_pcie_host_init(struct pcie_port *pp)
261{
262 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
263 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
264 u32 val;
265
266 ks_pcie_establish_link(ks_pcie);
267 ks_dw_pcie_setup_rc_app_regs(ks_pcie);
268 ks_pcie_setup_interrupts(ks_pcie);
269 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
270 pci->dbi_base + PCI_IO_BASE);
271
272 /* update the Vendor ID */
273 writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID);
274
275 /* update the DEV_STAT_CTRL to publish right mrrs */
276 val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
277 val &= ~PCI_EXP_DEVCTL_READRQ;
278 /* set the mrrs to 256 bytes */
279 val |= BIT(12);
280 writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
281
282 /*
283 * PCIe access errors that result into OCP errors are caught by ARM as
284 * "External aborts"
285 */
286 hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
287 "Asynchronous external abort");
288
289 return 0;
290}
291
292static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
293 .rd_other_conf = ks_dw_pcie_rd_other_conf,
294 .wr_other_conf = ks_dw_pcie_wr_other_conf,
295 .host_init = ks_pcie_host_init,
296 .msi_set_irq = ks_dw_pcie_msi_set_irq,
297 .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
298 .get_msi_addr = ks_dw_pcie_get_msi_addr,
299 .msi_host_init = ks_dw_pcie_msi_host_init,
300 .msi_irq_ack = ks_dw_pcie_msi_irq_ack,
301 .scan_bus = ks_dw_pcie_v3_65_scan_bus,
302};
303
304static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
305{
306 struct keystone_pcie *ks_pcie = priv;
307
308 return ks_dw_pcie_handle_error_irq(ks_pcie);
309}
310
311static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
312 struct platform_device *pdev)
313{
314 struct dw_pcie *pci = ks_pcie->pci;
315 struct pcie_port *pp = &pci->pp;
316 struct device *dev = &pdev->dev;
317 int ret;
318
319 ret = ks_pcie_get_irq_controller_info(ks_pcie,
320 "legacy-interrupt-controller",
321 &ks_pcie->num_legacy_host_irqs);
322 if (ret)
323 return ret;
324
325 if (IS_ENABLED(CONFIG_PCI_MSI)) {
326 ret = ks_pcie_get_irq_controller_info(ks_pcie,
327 "msi-interrupt-controller",
328 &ks_pcie->num_msi_host_irqs);
329 if (ret)
330 return ret;
331 }
332
333 /*
334 * Index 0 is the platform interrupt for error interrupt
335 * from RC. This is optional.
336 */
337 ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
338 if (ks_pcie->error_irq <= 0)
339 dev_info(dev, "no error IRQ defined\n");
340 else {
341 ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
342 IRQF_SHARED, "pcie-error-irq", ks_pcie);
343 if (ret < 0) {
344 dev_err(dev, "failed to request error IRQ %d\n",
345 ks_pcie->error_irq);
346 return ret;
347 }
348 }
349
350 pp->root_bus_nr = -1;
351 pp->ops = &keystone_pcie_host_ops;
352 ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
353 if (ret) {
354 dev_err(dev, "failed to initialize host\n");
355 return ret;
356 }
357
358 return 0;
359}
360
361static const struct of_device_id ks_pcie_of_match[] = {
362 {
363 .type = "pci",
364 .compatible = "ti,keystone-pcie",
365 },
366 { },
367};
368
369static const struct dw_pcie_ops dw_pcie_ops = {
370 .link_up = ks_dw_pcie_link_up,
371};
372
373static int __exit ks_pcie_remove(struct platform_device *pdev)
374{
375 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
376
377 clk_disable_unprepare(ks_pcie->clk);
378
379 return 0;
380}
381
382static int __init ks_pcie_probe(struct platform_device *pdev)
383{
384 struct device *dev = &pdev->dev;
385 struct dw_pcie *pci;
386 struct keystone_pcie *ks_pcie;
387 struct resource *res;
388 void __iomem *reg_p;
389 struct phy *phy;
390 int ret;
391
392 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
393 if (!ks_pcie)
394 return -ENOMEM;
395
396 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
397 if (!pci)
398 return -ENOMEM;
399
400 pci->dev = dev;
401 pci->ops = &dw_pcie_ops;
402
403 ks_pcie->pci = pci;
404
405 /* initialize SerDes Phy if present */
406 phy = devm_phy_get(dev, "pcie-phy");
407 if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER)
408 return PTR_ERR(phy);
409
410 if (!IS_ERR_OR_NULL(phy)) {
411 ret = phy_init(phy);
412 if (ret < 0)
413 return ret;
414 }
415
416 /* index 2 is to read PCI DEVICE_ID */
417 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
418 reg_p = devm_ioremap_resource(dev, res);
419 if (IS_ERR(reg_p))
420 return PTR_ERR(reg_p);
421 ks_pcie->device_id = readl(reg_p) >> 16;
422 devm_iounmap(dev, reg_p);
423 devm_release_mem_region(dev, res->start, resource_size(res));
424
425 ks_pcie->np = dev->of_node;
426 platform_set_drvdata(pdev, ks_pcie);
427 ks_pcie->clk = devm_clk_get(dev, "pcie");
428 if (IS_ERR(ks_pcie->clk)) {
429 dev_err(dev, "Failed to get pcie rc clock\n");
430 return PTR_ERR(ks_pcie->clk);
431 }
432 ret = clk_prepare_enable(ks_pcie->clk);
433 if (ret)
434 return ret;
435
436 platform_set_drvdata(pdev, ks_pcie);
437
438 ret = ks_add_pcie_port(ks_pcie, pdev);
439 if (ret < 0)
440 goto fail_clk;
441
442 return 0;
443fail_clk:
444 clk_disable_unprepare(ks_pcie->clk);
445
446 return ret;
447}
448
449static struct platform_driver ks_pcie_driver __refdata = {
450 .probe = ks_pcie_probe,
451 .remove = __exit_p(ks_pcie_remove),
452 .driver = {
453 .name = "keystone-pcie",
454 .of_match_table = of_match_ptr(ks_pcie_of_match),
455 },
456};
457builtin_platform_driver(ks_pcie_driver);