aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/host
diff options
context:
space:
mode:
authorKevin Hilman <khilman@linaro.org>2013-08-21 13:16:55 -0400
committerKevin Hilman <khilman@linaro.org>2013-08-21 13:17:18 -0400
commitbfa664f21b0357f2ad9cdf519f594ece36ec8f64 (patch)
treef377028eba58633d917d65c1141977b2e8ca9529 /drivers/pci/host
parent5515d9981f5f30e82d096921f86ba016911c9ea8 (diff)
parentb4f173752a56187bd55752b0474429202f2ab1d3 (diff)
Merge tag 'tegra-for-3.12-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra into next/soc
From: Stephen Warren: ARM: tegra: core SoC enhancements for 3.12 This branch includes a number of enhancements to core SoC support for Tegra devices. The major new features are: * Adds a new CPU-power-gated cpuidle state for Tegra114. * Adds initial system suspend support for Tegra114, initially supporting just CPU-power-gating during suspend. * Adds "LP1" suspend mode support for all of Tegra20/30/114. This mode both gates CPU power, and places the DRAM into self-refresh mode. * A new DT-driven PCIe driver to Tegra20/30. The driver is also moved from arch/arm/mach-tegra/ to drivers/pci/host/. The PCIe driver work depends on the following tag from Thomas Petazzoni: git://git.infradead.org/linux-mvebu.git mis-3.12.2 ... which is merged into the middle of this pull request. * tag 'tegra-for-3.12-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra: (33 commits) ARM: tegra: disable LP2 cpuidle state if PCIe is enabled MAINTAINERS: Add myself as Tegra PCIe maintainer PCI: tegra: set up PADS_REFCLK_CFG1 PCI: tegra: Add Tegra 30 PCIe support PCI: tegra: Move PCIe driver to drivers/pci/host PCI: msi: add default MSI operations for !HAVE_GENERIC_HARDIRQS platforms ARM: tegra: add LP1 suspend support for Tegra114 ARM: tegra: add LP1 suspend support for Tegra20 ARM: tegra: add LP1 suspend support for Tegra30 ARM: tegra: add common LP1 suspend support clk: tegra114: add LP1 suspend/resume support ARM: tegra: config the polarity of the request of sys clock ARM: tegra: add common resume handling code for LP1 resuming ARM: pci: add ->add_bus() and ->remove_bus() hooks to hw_pci of: pci: add registry of MSI chips PCI: Introduce new MSI chip infrastructure PCI: remove ARCH_SUPPORTS_MSI kconfig option PCI: use weak functions for MSI arch-specific functions ARM: tegra: unify Tegra's Kconfig a bit more ARM: tegra: remove the limitation that Tegra114 can't support suspend ... Signed-off-by: Kevin Hilman <khilman@linaro.org>
Diffstat (limited to 'drivers/pci/host')
-rw-r--r--drivers/pci/host/Kconfig4
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-tegra.c1702
3 files changed, 1707 insertions, 0 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 1184ff6fe864..5f33746c925b 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,4 +14,8 @@ config PCI_EXYNOS
14 select PCIEPORTBUS 14 select PCIEPORTBUS
15 select PCIE_DW 15 select PCIE_DW
16 16
17config PCI_TEGRA
18 bool "NVIDIA Tegra PCIe controller"
19 depends on ARCH_TEGRA
20
17endmenu 21endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 086d8500e849..a733fb0f7856 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o 1obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
2obj-$(CONFIG_PCIE_DW) += pcie-designware.o 2obj-$(CONFIG_PCIE_DW) += pcie-designware.o
3obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
new file mode 100644
index 000000000000..7356741de36b
--- /dev/null
+++ b/drivers/pci/host/pci-tegra.c
@@ -0,0 +1,1702 @@
1/*
2 * PCIe host controller driver for Tegra SoCs
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */
26
27#include <linux/clk.h>
28#include <linux/clk/tegra.h>
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
41#include <linux/platform_device.h>
42#include <linux/sizes.h>
43#include <linux/slab.h>
44#include <linux/tegra-cpuidle.h>
45#include <linux/tegra-powergate.h>
46#include <linux/vmalloc.h>
47#include <linux/regulator/consumer.h>
48
49#include <asm/mach/irq.h>
50#include <asm/mach/map.h>
51#include <asm/mach/pci.h>
52
53#define INT_PCI_MSI_NR (8 * 32)
54
55/* register definitions */
56
57#define AFI_AXI_BAR0_SZ 0x00
58#define AFI_AXI_BAR1_SZ 0x04
59#define AFI_AXI_BAR2_SZ 0x08
60#define AFI_AXI_BAR3_SZ 0x0c
61#define AFI_AXI_BAR4_SZ 0x10
62#define AFI_AXI_BAR5_SZ 0x14
63
64#define AFI_AXI_BAR0_START 0x18
65#define AFI_AXI_BAR1_START 0x1c
66#define AFI_AXI_BAR2_START 0x20
67#define AFI_AXI_BAR3_START 0x24
68#define AFI_AXI_BAR4_START 0x28
69#define AFI_AXI_BAR5_START 0x2c
70
71#define AFI_FPCI_BAR0 0x30
72#define AFI_FPCI_BAR1 0x34
73#define AFI_FPCI_BAR2 0x38
74#define AFI_FPCI_BAR3 0x3c
75#define AFI_FPCI_BAR4 0x40
76#define AFI_FPCI_BAR5 0x44
77
78#define AFI_CACHE_BAR0_SZ 0x48
79#define AFI_CACHE_BAR0_ST 0x4c
80#define AFI_CACHE_BAR1_SZ 0x50
81#define AFI_CACHE_BAR1_ST 0x54
82
83#define AFI_MSI_BAR_SZ 0x60
84#define AFI_MSI_FPCI_BAR_ST 0x64
85#define AFI_MSI_AXI_BAR_ST 0x68
86
87#define AFI_MSI_VEC0 0x6c
88#define AFI_MSI_VEC1 0x70
89#define AFI_MSI_VEC2 0x74
90#define AFI_MSI_VEC3 0x78
91#define AFI_MSI_VEC4 0x7c
92#define AFI_MSI_VEC5 0x80
93#define AFI_MSI_VEC6 0x84
94#define AFI_MSI_VEC7 0x88
95
96#define AFI_MSI_EN_VEC0 0x8c
97#define AFI_MSI_EN_VEC1 0x90
98#define AFI_MSI_EN_VEC2 0x94
99#define AFI_MSI_EN_VEC3 0x98
100#define AFI_MSI_EN_VEC4 0x9c
101#define AFI_MSI_EN_VEC5 0xa0
102#define AFI_MSI_EN_VEC6 0xa4
103#define AFI_MSI_EN_VEC7 0xa8
104
105#define AFI_CONFIGURATION 0xac
106#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
107
108#define AFI_FPCI_ERROR_MASKS 0xb0
109
110#define AFI_INTR_MASK 0xb4
111#define AFI_INTR_MASK_INT_MASK (1 << 0)
112#define AFI_INTR_MASK_MSI_MASK (1 << 8)
113
114#define AFI_INTR_CODE 0xb8
115#define AFI_INTR_CODE_MASK 0xf
116#define AFI_INTR_AXI_SLAVE_ERROR 1
117#define AFI_INTR_AXI_DECODE_ERROR 2
118#define AFI_INTR_TARGET_ABORT 3
119#define AFI_INTR_MASTER_ABORT 4
120#define AFI_INTR_INVALID_WRITE 5
121#define AFI_INTR_LEGACY 6
122#define AFI_INTR_FPCI_DECODE_ERROR 7
123
124#define AFI_INTR_SIGNATURE 0xbc
125#define AFI_UPPER_FPCI_ADDRESS 0xc0
126#define AFI_SM_INTR_ENABLE 0xc4
127#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
128#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
129#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
130#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
131#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
132#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
133#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
134#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
135
136#define AFI_AFI_INTR_ENABLE 0xc8
137#define AFI_INTR_EN_INI_SLVERR (1 << 0)
138#define AFI_INTR_EN_INI_DECERR (1 << 1)
139#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
140#define AFI_INTR_EN_TGT_DECERR (1 << 3)
141#define AFI_INTR_EN_TGT_WRERR (1 << 4)
142#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
143#define AFI_INTR_EN_AXI_DECERR (1 << 6)
144#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
145#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
146
147#define AFI_PCIE_CONFIG 0x0f8
148#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
149#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
150#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
151#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
152#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
153#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
154#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
155#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
156
157#define AFI_FUSE 0x104
158#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
159
160#define AFI_PEX0_CTRL 0x110
161#define AFI_PEX1_CTRL 0x118
162#define AFI_PEX2_CTRL 0x128
163#define AFI_PEX_CTRL_RST (1 << 0)
164#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
165#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
166
167#define AFI_PEXBIAS_CTRL_0 0x168
168
169#define RP_VEND_XP 0x00000F00
170#define RP_VEND_XP_DL_UP (1 << 30)
171
172#define RP_LINK_CONTROL_STATUS 0x00000090
173#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
174#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
175
176#define PADS_CTL_SEL 0x0000009C
177
178#define PADS_CTL 0x000000A0
179#define PADS_CTL_IDDQ_1L (1 << 0)
180#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
181#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
182
183#define PADS_PLL_CTL_TEGRA20 0x000000B8
184#define PADS_PLL_CTL_TEGRA30 0x000000B4
185#define PADS_PLL_CTL_RST_B4SM (1 << 1)
186#define PADS_PLL_CTL_LOCKDET (1 << 8)
187#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
188#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
189#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
190#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
191#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
192#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
193#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
194#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
195
196#define PADS_REFCLK_CFG0 0x000000C8
197#define PADS_REFCLK_CFG1 0x000000CC
198
199/*
200 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
201 * entries, one entry per PCIe port. These field definitions and desired
202 * values aren't in the TRM, but do come from NVIDIA.
203 */
204#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
205#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
206#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
207#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
208
209/* Default value provided by HW engineering is 0xfa5c */
210#define PADS_REFCLK_CFG_VALUE \
211 ( \
212 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
213 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
214 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
215 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
216 )
217
218struct tegra_msi {
219 struct msi_chip chip;
220 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
221 struct irq_domain *domain;
222 unsigned long pages;
223 struct mutex lock;
224 int irq;
225};
226
227/* used to differentiate between Tegra SoC generations */
228struct tegra_pcie_soc_data {
229 unsigned int num_ports;
230 unsigned int msi_base_shift;
231 u32 pads_pll_ctl;
232 u32 tx_ref_sel;
233 bool has_pex_clkreq_en;
234 bool has_pex_bias_ctrl;
235 bool has_intr_prsnt_sense;
236 bool has_avdd_supply;
237 bool has_cml_clk;
238};
239
240static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
241{
242 return container_of(chip, struct tegra_msi, chip);
243}
244
245struct tegra_pcie {
246 struct device *dev;
247
248 void __iomem *pads;
249 void __iomem *afi;
250 int irq;
251
252 struct list_head busses;
253 struct resource *cs;
254
255 struct resource io;
256 struct resource mem;
257 struct resource prefetch;
258 struct resource busn;
259
260 struct clk *pex_clk;
261 struct clk *afi_clk;
262 struct clk *pcie_xclk;
263 struct clk *pll_e;
264 struct clk *cml_clk;
265
266 struct tegra_msi msi;
267
268 struct list_head ports;
269 unsigned int num_ports;
270 u32 xbar_config;
271
272 struct regulator *pex_clk_supply;
273 struct regulator *vdd_supply;
274 struct regulator *avdd_supply;
275
276 const struct tegra_pcie_soc_data *soc_data;
277};
278
279struct tegra_pcie_port {
280 struct tegra_pcie *pcie;
281 struct list_head list;
282 struct resource regs;
283 void __iomem *base;
284 unsigned int index;
285 unsigned int lanes;
286};
287
288struct tegra_pcie_bus {
289 struct vm_struct *area;
290 struct list_head list;
291 unsigned int nr;
292};
293
294static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
295{
296 return sys->private_data;
297}
298
299static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
300 unsigned long offset)
301{
302 writel(value, pcie->afi + offset);
303}
304
305static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
306{
307 return readl(pcie->afi + offset);
308}
309
310static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
311 unsigned long offset)
312{
313 writel(value, pcie->pads + offset);
314}
315
316static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
317{
318 return readl(pcie->pads + offset);
319}
320
321/*
322 * The configuration space mapping on Tegra is somewhat similar to the ECAM
323 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
324 * register accesses are mapped:
325 *
326 * [27:24] extended register number
327 * [23:16] bus number
328 * [15:11] device number
329 * [10: 8] function number
330 * [ 7: 0] register number
331 *
332 * Mapping the whole extended configuration space would require 256 MiB of
333 * virtual address space, only a small part of which will actually be used.
334 * To work around this, a 1 MiB of virtual addresses are allocated per bus
335 * when the bus is first accessed. When the physical range is mapped, the
336 * the bus number bits are hidden so that the extended register number bits
337 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
338 *
339 * [19:16] extended register number
340 * [15:11] device number
341 * [10: 8] function number
342 * [ 7: 0] register number
343 *
344 * This is achieved by stitching together 16 chunks of 64 KiB of physical
345 * address space via the MMU.
346 */
347static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
348{
349 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
350 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
351}
352
353static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
354 unsigned int busnr)
355{
356 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
357 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
358 phys_addr_t cs = pcie->cs->start;
359 struct tegra_pcie_bus *bus;
360 unsigned int i;
361 int err;
362
363 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
364 if (!bus)
365 return ERR_PTR(-ENOMEM);
366
367 INIT_LIST_HEAD(&bus->list);
368 bus->nr = busnr;
369
370 /* allocate 1 MiB of virtual addresses */
371 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
372 if (!bus->area) {
373 err = -ENOMEM;
374 goto free;
375 }
376
377 /* map each of the 16 chunks of 64 KiB each */
378 for (i = 0; i < 16; i++) {
379 unsigned long virt = (unsigned long)bus->area->addr +
380 i * SZ_64K;
381 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
382
383 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
384 if (err < 0) {
385 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
386 err);
387 goto unmap;
388 }
389 }
390
391 return bus;
392
393unmap:
394 vunmap(bus->area->addr);
395free:
396 kfree(bus);
397 return ERR_PTR(err);
398}
399
400/*
401 * Look up a virtual address mapping for the specified bus number. If no such
402 * mapping existis, try to create one.
403 */
404static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
405 unsigned int busnr)
406{
407 struct tegra_pcie_bus *bus;
408
409 list_for_each_entry(bus, &pcie->busses, list)
410 if (bus->nr == busnr)
411 return bus->area->addr;
412
413 bus = tegra_pcie_bus_alloc(pcie, busnr);
414 if (IS_ERR(bus))
415 return NULL;
416
417 list_add_tail(&bus->list, &pcie->busses);
418
419 return bus->area->addr;
420}
421
422static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
423 unsigned int devfn,
424 int where)
425{
426 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
427 void __iomem *addr = NULL;
428
429 if (bus->number == 0) {
430 unsigned int slot = PCI_SLOT(devfn);
431 struct tegra_pcie_port *port;
432
433 list_for_each_entry(port, &pcie->ports, list) {
434 if (port->index + 1 == slot) {
435 addr = port->base + (where & ~3);
436 break;
437 }
438 }
439 } else {
440 addr = tegra_pcie_bus_map(pcie, bus->number);
441 if (!addr) {
442 dev_err(pcie->dev,
443 "failed to map cfg. space for bus %u\n",
444 bus->number);
445 return NULL;
446 }
447
448 addr += tegra_pcie_conf_offset(devfn, where);
449 }
450
451 return addr;
452}
453
454static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
455 int where, int size, u32 *value)
456{
457 void __iomem *addr;
458
459 addr = tegra_pcie_conf_address(bus, devfn, where);
460 if (!addr) {
461 *value = 0xffffffff;
462 return PCIBIOS_DEVICE_NOT_FOUND;
463 }
464
465 *value = readl(addr);
466
467 if (size == 1)
468 *value = (*value >> (8 * (where & 3))) & 0xff;
469 else if (size == 2)
470 *value = (*value >> (8 * (where & 3))) & 0xffff;
471
472 return PCIBIOS_SUCCESSFUL;
473}
474
475static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
476 int where, int size, u32 value)
477{
478 void __iomem *addr;
479 u32 mask, tmp;
480
481 addr = tegra_pcie_conf_address(bus, devfn, where);
482 if (!addr)
483 return PCIBIOS_DEVICE_NOT_FOUND;
484
485 if (size == 4) {
486 writel(value, addr);
487 return PCIBIOS_SUCCESSFUL;
488 }
489
490 if (size == 2)
491 mask = ~(0xffff << ((where & 0x3) * 8));
492 else if (size == 1)
493 mask = ~(0xff << ((where & 0x3) * 8));
494 else
495 return PCIBIOS_BAD_REGISTER_NUMBER;
496
497 tmp = readl(addr) & mask;
498 tmp |= value << ((where & 0x3) * 8);
499 writel(tmp, addr);
500
501 return PCIBIOS_SUCCESSFUL;
502}
503
504static struct pci_ops tegra_pcie_ops = {
505 .read = tegra_pcie_read_conf,
506 .write = tegra_pcie_write_conf,
507};
508
509static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
510{
511 unsigned long ret = 0;
512
513 switch (port->index) {
514 case 0:
515 ret = AFI_PEX0_CTRL;
516 break;
517
518 case 1:
519 ret = AFI_PEX1_CTRL;
520 break;
521
522 case 2:
523 ret = AFI_PEX2_CTRL;
524 break;
525 }
526
527 return ret;
528}
529
530static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
531{
532 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
533 unsigned long value;
534
535 /* pulse reset signal */
536 value = afi_readl(port->pcie, ctrl);
537 value &= ~AFI_PEX_CTRL_RST;
538 afi_writel(port->pcie, value, ctrl);
539
540 usleep_range(1000, 2000);
541
542 value = afi_readl(port->pcie, ctrl);
543 value |= AFI_PEX_CTRL_RST;
544 afi_writel(port->pcie, value, ctrl);
545}
546
547static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
548{
549 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
550 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
551 unsigned long value;
552
553 /* enable reference clock */
554 value = afi_readl(port->pcie, ctrl);
555 value |= AFI_PEX_CTRL_REFCLK_EN;
556
557 if (soc->has_pex_clkreq_en)
558 value |= AFI_PEX_CTRL_CLKREQ_EN;
559
560 afi_writel(port->pcie, value, ctrl);
561
562 tegra_pcie_port_reset(port);
563}
564
565static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
566{
567 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
568 unsigned long value;
569
570 /* assert port reset */
571 value = afi_readl(port->pcie, ctrl);
572 value &= ~AFI_PEX_CTRL_RST;
573 afi_writel(port->pcie, value, ctrl);
574
575 /* disable reference clock */
576 value = afi_readl(port->pcie, ctrl);
577 value &= ~AFI_PEX_CTRL_REFCLK_EN;
578 afi_writel(port->pcie, value, ctrl);
579}
580
581static void tegra_pcie_port_free(struct tegra_pcie_port *port)
582{
583 struct tegra_pcie *pcie = port->pcie;
584
585 devm_iounmap(pcie->dev, port->base);
586 devm_release_mem_region(pcie->dev, port->regs.start,
587 resource_size(&port->regs));
588 list_del(&port->list);
589 devm_kfree(pcie->dev, port);
590}
591
592static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
593{
594 u16 reg;
595
596 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
597 pci_read_config_word(dev, PCI_COMMAND, &reg);
598 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
599 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
600 pci_write_config_word(dev, PCI_COMMAND, reg);
601 }
602}
603DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
604
605/* Tegra PCIE root complex wrongly reports device class */
606static void tegra_pcie_fixup_class(struct pci_dev *dev)
607{
608 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
609}
610DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
611DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
612DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
613DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
614
615/* Tegra PCIE requires relaxed ordering */
616static void tegra_pcie_relax_enable(struct pci_dev *dev)
617{
618 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
619}
620DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
621
622static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
623{
624 struct tegra_pcie *pcie = sys_to_pcie(sys);
625
626 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
627 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
628 sys->mem_offset);
629 pci_add_resource(&sys->resources, &pcie->busn);
630
631 pci_ioremap_io(nr * SZ_64K, pcie->io.start);
632
633 return 1;
634}
635
636static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
637{
638 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
639
640 tegra_cpuidle_pcie_irqs_in_use();
641
642 return pcie->irq;
643}
644
645static void tegra_pcie_add_bus(struct pci_bus *bus)
646{
647 if (IS_ENABLED(CONFIG_PCI_MSI)) {
648 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
649
650 bus->msi = &pcie->msi.chip;
651 }
652}
653
654static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
655{
656 struct tegra_pcie *pcie = sys_to_pcie(sys);
657 struct pci_bus *bus;
658
659 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
660 &sys->resources);
661 if (!bus)
662 return NULL;
663
664 pci_scan_child_bus(bus);
665
666 return bus;
667}
668
669static irqreturn_t tegra_pcie_isr(int irq, void *arg)
670{
671 const char *err_msg[] = {
672 "Unknown",
673 "AXI slave error",
674 "AXI decode error",
675 "Target abort",
676 "Master abort",
677 "Invalid write",
678 "Response decoding error",
679 "AXI response decoding error",
680 "Transaction timeout",
681 };
682 struct tegra_pcie *pcie = arg;
683 u32 code, signature;
684
685 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
686 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
687 afi_writel(pcie, 0, AFI_INTR_CODE);
688
689 if (code == AFI_INTR_LEGACY)
690 return IRQ_NONE;
691
692 if (code >= ARRAY_SIZE(err_msg))
693 code = 0;
694
695 /*
696 * do not pollute kernel log with master abort reports since they
697 * happen a lot during enumeration
698 */
699 if (code == AFI_INTR_MASTER_ABORT)
700 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
701 signature);
702 else
703 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
704 signature);
705
706 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
707 code == AFI_INTR_FPCI_DECODE_ERROR) {
708 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
709 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
710
711 if (code == AFI_INTR_MASTER_ABORT)
712 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
713 else
714 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
715 }
716
717 return IRQ_HANDLED;
718}
719
720/*
721 * FPCI map is as follows:
722 * - 0xfdfc000000: I/O space
723 * - 0xfdfe000000: type 0 configuration space
724 * - 0xfdff000000: type 1 configuration space
725 * - 0xfe00000000: type 0 extended configuration space
726 * - 0xfe10000000: type 1 extended configuration space
727 */
728static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
729{
730 u32 fpci_bar, size, axi_address;
731
732 /* Bar 0: type 1 extended configuration space */
733 fpci_bar = 0xfe100000;
734 size = resource_size(pcie->cs);
735 axi_address = pcie->cs->start;
736 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
737 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
738 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
739
740 /* Bar 1: downstream IO bar */
741 fpci_bar = 0xfdfc0000;
742 size = resource_size(&pcie->io);
743 axi_address = pcie->io.start;
744 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
745 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
746 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
747
748 /* Bar 2: prefetchable memory BAR */
749 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
750 size = resource_size(&pcie->prefetch);
751 axi_address = pcie->prefetch.start;
752 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
753 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
754 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
755
756 /* Bar 3: non prefetchable memory BAR */
757 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
758 size = resource_size(&pcie->mem);
759 axi_address = pcie->mem.start;
760 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
761 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
762 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
763
764 /* NULL out the remaining BARs as they are not used */
765 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
766 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
767 afi_writel(pcie, 0, AFI_FPCI_BAR4);
768
769 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
770 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
771 afi_writel(pcie, 0, AFI_FPCI_BAR5);
772
773 /* map all upstream transactions as uncached */
774 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
775 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
776 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
777 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
778
779 /* MSI translations are setup only when needed */
780 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
781 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
782 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
783 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
784}
785
786static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
787{
788 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
789 struct tegra_pcie_port *port;
790 unsigned int timeout;
791 unsigned long value;
792
793 /* power down PCIe slot clock bias pad */
794 if (soc->has_pex_bias_ctrl)
795 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
796
797 /* configure mode and disable all ports */
798 value = afi_readl(pcie, AFI_PCIE_CONFIG);
799 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
800 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
801
802 list_for_each_entry(port, &pcie->ports, list)
803 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
804
805 afi_writel(pcie, value, AFI_PCIE_CONFIG);
806
807 value = afi_readl(pcie, AFI_FUSE);
808 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
809 afi_writel(pcie, value, AFI_FUSE);
810
811 /* initialze internal PHY, enable up to 16 PCIE lanes */
812 pads_writel(pcie, 0x0, PADS_CTL_SEL);
813
814 /* override IDDQ to 1 on all 4 lanes */
815 value = pads_readl(pcie, PADS_CTL);
816 value |= PADS_CTL_IDDQ_1L;
817 pads_writel(pcie, value, PADS_CTL);
818
819 /*
820 * Set up PHY PLL inputs select PLLE output as refclock,
821 * set TX ref sel to div10 (not div5).
822 */
823 value = pads_readl(pcie, soc->pads_pll_ctl);
824 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
825 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
826 pads_writel(pcie, value, soc->pads_pll_ctl);
827
828 /* take PLL out of reset */
829 value = pads_readl(pcie, soc->pads_pll_ctl);
830 value |= PADS_PLL_CTL_RST_B4SM;
831 pads_writel(pcie, value, soc->pads_pll_ctl);
832
833 /* Configure the reference clock driver */
834 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
835 pads_writel(pcie, value, PADS_REFCLK_CFG0);
836 if (soc->num_ports > 2)
837 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
838
839 /* wait for the PLL to lock */
840 timeout = 300;
841 do {
842 value = pads_readl(pcie, soc->pads_pll_ctl);
843 usleep_range(1000, 2000);
844 if (--timeout == 0) {
845 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
846 return -EBUSY;
847 }
848 } while (!(value & PADS_PLL_CTL_LOCKDET));
849
850 /* turn off IDDQ override */
851 value = pads_readl(pcie, PADS_CTL);
852 value &= ~PADS_CTL_IDDQ_1L;
853 pads_writel(pcie, value, PADS_CTL);
854
855 /* enable TX/RX data */
856 value = pads_readl(pcie, PADS_CTL);
857 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
858 pads_writel(pcie, value, PADS_CTL);
859
860 /* take the PCIe interface module out of reset */
861 tegra_periph_reset_deassert(pcie->pcie_xclk);
862
863 /* finally enable PCIe */
864 value = afi_readl(pcie, AFI_CONFIGURATION);
865 value |= AFI_CONFIGURATION_EN_FPCI;
866 afi_writel(pcie, value, AFI_CONFIGURATION);
867
868 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
869 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
870 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
871
872 if (soc->has_intr_prsnt_sense)
873 value |= AFI_INTR_EN_PRSNT_SENSE;
874
875 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
876 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
877
878 /* don't enable MSI for now, only when needed */
879 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
880
881 /* disable all exceptions */
882 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
883
884 return 0;
885}
886
887static void tegra_pcie_power_off(struct tegra_pcie *pcie)
888{
889 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
890 int err;
891
892 /* TODO: disable and unprepare clocks? */
893
894 tegra_periph_reset_assert(pcie->pcie_xclk);
895 tegra_periph_reset_assert(pcie->afi_clk);
896 tegra_periph_reset_assert(pcie->pex_clk);
897
898 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
899
900 if (soc->has_avdd_supply) {
901 err = regulator_disable(pcie->avdd_supply);
902 if (err < 0)
903 dev_warn(pcie->dev,
904 "failed to disable AVDD regulator: %d\n",
905 err);
906 }
907
908 err = regulator_disable(pcie->pex_clk_supply);
909 if (err < 0)
910 dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
911 err);
912
913 err = regulator_disable(pcie->vdd_supply);
914 if (err < 0)
915 dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
916 err);
917}
918
919static int tegra_pcie_power_on(struct tegra_pcie *pcie)
920{
921 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
922 int err;
923
924 tegra_periph_reset_assert(pcie->pcie_xclk);
925 tegra_periph_reset_assert(pcie->afi_clk);
926 tegra_periph_reset_assert(pcie->pex_clk);
927
928 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
929
930 /* enable regulators */
931 err = regulator_enable(pcie->vdd_supply);
932 if (err < 0) {
933 dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
934 return err;
935 }
936
937 err = regulator_enable(pcie->pex_clk_supply);
938 if (err < 0) {
939 dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
940 err);
941 return err;
942 }
943
944 if (soc->has_avdd_supply) {
945 err = regulator_enable(pcie->avdd_supply);
946 if (err < 0) {
947 dev_err(pcie->dev,
948 "failed to enable AVDD regulator: %d\n",
949 err);
950 return err;
951 }
952 }
953
954 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
955 pcie->pex_clk);
956 if (err) {
957 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
958 return err;
959 }
960
961 tegra_periph_reset_deassert(pcie->afi_clk);
962
963 err = clk_prepare_enable(pcie->afi_clk);
964 if (err < 0) {
965 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
966 return err;
967 }
968
969 if (soc->has_cml_clk) {
970 err = clk_prepare_enable(pcie->cml_clk);
971 if (err < 0) {
972 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
973 err);
974 return err;
975 }
976 }
977
978 err = clk_prepare_enable(pcie->pll_e);
979 if (err < 0) {
980 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
981 return err;
982 }
983
984 return 0;
985}
986
987static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
988{
989 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
990
991 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
992 if (IS_ERR(pcie->pex_clk))
993 return PTR_ERR(pcie->pex_clk);
994
995 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
996 if (IS_ERR(pcie->afi_clk))
997 return PTR_ERR(pcie->afi_clk);
998
999 pcie->pcie_xclk = devm_clk_get(pcie->dev, "pcie_xclk");
1000 if (IS_ERR(pcie->pcie_xclk))
1001 return PTR_ERR(pcie->pcie_xclk);
1002
1003 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1004 if (IS_ERR(pcie->pll_e))
1005 return PTR_ERR(pcie->pll_e);
1006
1007 if (soc->has_cml_clk) {
1008 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1009 if (IS_ERR(pcie->cml_clk))
1010 return PTR_ERR(pcie->cml_clk);
1011 }
1012
1013 return 0;
1014}
1015
1016static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1017{
1018 struct platform_device *pdev = to_platform_device(pcie->dev);
1019 struct resource *pads, *afi, *res;
1020 int err;
1021
1022 err = tegra_pcie_clocks_get(pcie);
1023 if (err) {
1024 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1025 return err;
1026 }
1027
1028 err = tegra_pcie_power_on(pcie);
1029 if (err) {
1030 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1031 return err;
1032 }
1033
1034 /* request and remap controller registers */
1035 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1036 if (!pads) {
1037 err = -EADDRNOTAVAIL;
1038 goto poweroff;
1039 }
1040
1041 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1042 if (!afi) {
1043 err = -EADDRNOTAVAIL;
1044 goto poweroff;
1045 }
1046
1047 pcie->pads = devm_request_and_ioremap(&pdev->dev, pads);
1048 if (!pcie->pads) {
1049 err = -EADDRNOTAVAIL;
1050 goto poweroff;
1051 }
1052
1053 pcie->afi = devm_request_and_ioremap(&pdev->dev, afi);
1054 if (!pcie->afi) {
1055 err = -EADDRNOTAVAIL;
1056 goto poweroff;
1057 }
1058
1059 /* request and remap configuration space */
1060 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1061 if (!res) {
1062 err = -EADDRNOTAVAIL;
1063 goto poweroff;
1064 }
1065
1066 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1067 resource_size(res), res->name);
1068 if (!pcie->cs) {
1069 err = -EADDRNOTAVAIL;
1070 goto poweroff;
1071 }
1072
1073 /* request interrupt */
1074 err = platform_get_irq_byname(pdev, "intr");
1075 if (err < 0) {
1076 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1077 goto poweroff;
1078 }
1079
1080 pcie->irq = err;
1081
1082 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1083 if (err) {
1084 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1085 goto poweroff;
1086 }
1087
1088 return 0;
1089
1090poweroff:
1091 tegra_pcie_power_off(pcie);
1092 return err;
1093}
1094
1095static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1096{
1097 if (pcie->irq > 0)
1098 free_irq(pcie->irq, pcie);
1099
1100 tegra_pcie_power_off(pcie);
1101 return 0;
1102}
1103
1104static int tegra_msi_alloc(struct tegra_msi *chip)
1105{
1106 int msi;
1107
1108 mutex_lock(&chip->lock);
1109
1110 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1111 if (msi < INT_PCI_MSI_NR)
1112 set_bit(msi, chip->used);
1113 else
1114 msi = -ENOSPC;
1115
1116 mutex_unlock(&chip->lock);
1117
1118 return msi;
1119}
1120
1121static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1122{
1123 struct device *dev = chip->chip.dev;
1124
1125 mutex_lock(&chip->lock);
1126
1127 if (!test_bit(irq, chip->used))
1128 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1129 else
1130 clear_bit(irq, chip->used);
1131
1132 mutex_unlock(&chip->lock);
1133}
1134
1135static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1136{
1137 struct tegra_pcie *pcie = data;
1138 struct tegra_msi *msi = &pcie->msi;
1139 unsigned int i, processed = 0;
1140
1141 for (i = 0; i < 8; i++) {
1142 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1143
1144 while (reg) {
1145 unsigned int offset = find_first_bit(&reg, 32);
1146 unsigned int index = i * 32 + offset;
1147 unsigned int irq;
1148
1149 /* clear the interrupt */
1150 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1151
1152 irq = irq_find_mapping(msi->domain, index);
1153 if (irq) {
1154 if (test_bit(index, msi->used))
1155 generic_handle_irq(irq);
1156 else
1157 dev_info(pcie->dev, "unhandled MSI\n");
1158 } else {
1159 /*
1160 * that's weird who triggered this?
1161 * just clear it
1162 */
1163 dev_info(pcie->dev, "unexpected MSI\n");
1164 }
1165
1166 /* see if there's any more pending in this vector */
1167 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1168
1169 processed++;
1170 }
1171 }
1172
1173 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1174}
1175
1176static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1177 struct msi_desc *desc)
1178{
1179 struct tegra_msi *msi = to_tegra_msi(chip);
1180 struct msi_msg msg;
1181 unsigned int irq;
1182 int hwirq;
1183
1184 hwirq = tegra_msi_alloc(msi);
1185 if (hwirq < 0)
1186 return hwirq;
1187
1188 irq = irq_create_mapping(msi->domain, hwirq);
1189 if (!irq)
1190 return -EINVAL;
1191
1192 irq_set_msi_desc(irq, desc);
1193
1194 msg.address_lo = virt_to_phys((void *)msi->pages);
1195 /* 32 bit address only */
1196 msg.address_hi = 0;
1197 msg.data = hwirq;
1198
1199 write_msi_msg(irq, &msg);
1200
1201 return 0;
1202}
1203
1204static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1205{
1206 struct tegra_msi *msi = to_tegra_msi(chip);
1207 struct irq_data *d = irq_get_irq_data(irq);
1208
1209 tegra_msi_free(msi, d->hwirq);
1210}
1211
1212static struct irq_chip tegra_msi_irq_chip = {
1213 .name = "Tegra PCIe MSI",
1214 .irq_enable = unmask_msi_irq,
1215 .irq_disable = mask_msi_irq,
1216 .irq_mask = mask_msi_irq,
1217 .irq_unmask = unmask_msi_irq,
1218};
1219
1220static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1221 irq_hw_number_t hwirq)
1222{
1223 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1224 irq_set_chip_data(irq, domain->host_data);
1225 set_irq_flags(irq, IRQF_VALID);
1226
1227 tegra_cpuidle_pcie_irqs_in_use();
1228
1229 return 0;
1230}
1231
1232static const struct irq_domain_ops msi_domain_ops = {
1233 .map = tegra_msi_map,
1234};
1235
1236static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1237{
1238 struct platform_device *pdev = to_platform_device(pcie->dev);
1239 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1240 struct tegra_msi *msi = &pcie->msi;
1241 unsigned long base;
1242 int err;
1243 u32 reg;
1244
1245 mutex_init(&msi->lock);
1246
1247 msi->chip.dev = pcie->dev;
1248 msi->chip.setup_irq = tegra_msi_setup_irq;
1249 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1250
1251 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1252 &msi_domain_ops, &msi->chip);
1253 if (!msi->domain) {
1254 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1255 return -ENOMEM;
1256 }
1257
1258 err = platform_get_irq_byname(pdev, "msi");
1259 if (err < 0) {
1260 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1261 goto err;
1262 }
1263
1264 msi->irq = err;
1265
1266 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1267 tegra_msi_irq_chip.name, pcie);
1268 if (err < 0) {
1269 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1270 goto err;
1271 }
1272
1273 /* setup AFI/FPCI range */
1274 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1275 base = virt_to_phys((void *)msi->pages);
1276
1277 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1278 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1279 /* this register is in 4K increments */
1280 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1281
1282 /* enable all MSI vectors */
1283 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1284 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1285 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1286 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1287 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1288 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1289 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1290 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1291
1292 /* and unmask the MSI interrupt */
1293 reg = afi_readl(pcie, AFI_INTR_MASK);
1294 reg |= AFI_INTR_MASK_MSI_MASK;
1295 afi_writel(pcie, reg, AFI_INTR_MASK);
1296
1297 return 0;
1298
1299err:
1300 irq_domain_remove(msi->domain);
1301 return err;
1302}
1303
1304static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1305{
1306 struct tegra_msi *msi = &pcie->msi;
1307 unsigned int i, irq;
1308 u32 value;
1309
1310 /* mask the MSI interrupt */
1311 value = afi_readl(pcie, AFI_INTR_MASK);
1312 value &= ~AFI_INTR_MASK_MSI_MASK;
1313 afi_writel(pcie, value, AFI_INTR_MASK);
1314
1315 /* disable all MSI vectors */
1316 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1317 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1318 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1319 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1320 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1321 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1322 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1323 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1324
1325 free_pages(msi->pages, 0);
1326
1327 if (msi->irq > 0)
1328 free_irq(msi->irq, pcie);
1329
1330 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1331 irq = irq_find_mapping(msi->domain, i);
1332 if (irq > 0)
1333 irq_dispose_mapping(irq);
1334 }
1335
1336 irq_domain_remove(msi->domain);
1337
1338 return 0;
1339}
1340
1341static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1342 u32 *xbar)
1343{
1344 struct device_node *np = pcie->dev->of_node;
1345
1346 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1347 switch (lanes) {
1348 case 0x00000204:
1349 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1350 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1351 return 0;
1352
1353 case 0x00020202:
1354 dev_info(pcie->dev, "2x3 configuration\n");
1355 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1356 return 0;
1357
1358 case 0x00010104:
1359 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1360 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1361 return 0;
1362 }
1363 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1364 switch (lanes) {
1365 case 0x00000004:
1366 dev_info(pcie->dev, "single-mode configuration\n");
1367 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1368 return 0;
1369
1370 case 0x00000202:
1371 dev_info(pcie->dev, "dual-mode configuration\n");
1372 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1373 return 0;
1374 }
1375 }
1376
1377 return -EINVAL;
1378}
1379
1380static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1381{
1382 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1383 struct device_node *np = pcie->dev->of_node, *port;
1384 struct of_pci_range_parser parser;
1385 struct of_pci_range range;
1386 struct resource res;
1387 u32 lanes = 0;
1388 int err;
1389
1390 if (of_pci_range_parser_init(&parser, np)) {
1391 dev_err(pcie->dev, "missing \"ranges\" property\n");
1392 return -EINVAL;
1393 }
1394
1395 pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
1396 if (IS_ERR(pcie->vdd_supply))
1397 return PTR_ERR(pcie->vdd_supply);
1398
1399 pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
1400 if (IS_ERR(pcie->pex_clk_supply))
1401 return PTR_ERR(pcie->pex_clk_supply);
1402
1403 if (soc->has_avdd_supply) {
1404 pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
1405 if (IS_ERR(pcie->avdd_supply))
1406 return PTR_ERR(pcie->avdd_supply);
1407 }
1408
1409 for_each_of_pci_range(&parser, &range) {
1410 of_pci_range_to_resource(&range, np, &res);
1411
1412 switch (res.flags & IORESOURCE_TYPE_BITS) {
1413 case IORESOURCE_IO:
1414 memcpy(&pcie->io, &res, sizeof(res));
1415 pcie->io.name = "I/O";
1416 break;
1417
1418 case IORESOURCE_MEM:
1419 if (res.flags & IORESOURCE_PREFETCH) {
1420 memcpy(&pcie->prefetch, &res, sizeof(res));
1421 pcie->prefetch.name = "PREFETCH";
1422 } else {
1423 memcpy(&pcie->mem, &res, sizeof(res));
1424 pcie->mem.name = "MEM";
1425 }
1426 break;
1427 }
1428 }
1429
1430 err = of_pci_parse_bus_range(np, &pcie->busn);
1431 if (err < 0) {
1432 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1433 err);
1434 pcie->busn.name = np->name;
1435 pcie->busn.start = 0;
1436 pcie->busn.end = 0xff;
1437 pcie->busn.flags = IORESOURCE_BUS;
1438 }
1439
1440 /* parse root ports */
1441 for_each_child_of_node(np, port) {
1442 struct tegra_pcie_port *rp;
1443 unsigned int index;
1444 u32 value;
1445
1446 err = of_pci_get_devfn(port);
1447 if (err < 0) {
1448 dev_err(pcie->dev, "failed to parse address: %d\n",
1449 err);
1450 return err;
1451 }
1452
1453 index = PCI_SLOT(err);
1454
1455 if (index < 1 || index > soc->num_ports) {
1456 dev_err(pcie->dev, "invalid port number: %d\n", index);
1457 return -EINVAL;
1458 }
1459
1460 index--;
1461
1462 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1463 if (err < 0) {
1464 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1465 err);
1466 return err;
1467 }
1468
1469 if (value > 16) {
1470 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1471 return -EINVAL;
1472 }
1473
1474 lanes |= value << (index << 3);
1475
1476 if (!of_device_is_available(port))
1477 continue;
1478
1479 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1480 if (!rp)
1481 return -ENOMEM;
1482
1483 err = of_address_to_resource(port, 0, &rp->regs);
1484 if (err < 0) {
1485 dev_err(pcie->dev, "failed to parse address: %d\n",
1486 err);
1487 return err;
1488 }
1489
1490 INIT_LIST_HEAD(&rp->list);
1491 rp->index = index;
1492 rp->lanes = value;
1493 rp->pcie = pcie;
1494
1495 rp->base = devm_request_and_ioremap(pcie->dev, &rp->regs);
1496 if (!rp->base)
1497 return -EADDRNOTAVAIL;
1498
1499 list_add_tail(&rp->list, &pcie->ports);
1500 }
1501
1502 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1503 if (err < 0) {
1504 dev_err(pcie->dev, "invalid lane configuration\n");
1505 return err;
1506 }
1507
1508 return 0;
1509}
1510
1511/*
1512 * FIXME: If there are no PCIe cards attached, then calling this function
1513 * can result in the increase of the bootup time as there are big timeout
1514 * loops.
1515 */
1516#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1517static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1518{
1519 unsigned int retries = 3;
1520 unsigned long value;
1521
1522 do {
1523 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1524
1525 do {
1526 value = readl(port->base + RP_VEND_XP);
1527
1528 if (value & RP_VEND_XP_DL_UP)
1529 break;
1530
1531 usleep_range(1000, 2000);
1532 } while (--timeout);
1533
1534 if (!timeout) {
1535 dev_err(port->pcie->dev, "link %u down, retrying\n",
1536 port->index);
1537 goto retry;
1538 }
1539
1540 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1541
1542 do {
1543 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1544
1545 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1546 return true;
1547
1548 usleep_range(1000, 2000);
1549 } while (--timeout);
1550
1551retry:
1552 tegra_pcie_port_reset(port);
1553 } while (--retries);
1554
1555 return false;
1556}
1557
1558static int tegra_pcie_enable(struct tegra_pcie *pcie)
1559{
1560 struct tegra_pcie_port *port, *tmp;
1561 struct hw_pci hw;
1562
1563 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1564 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1565 port->index, port->lanes);
1566
1567 tegra_pcie_port_enable(port);
1568
1569 if (tegra_pcie_port_check_link(port))
1570 continue;
1571
1572 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1573
1574 tegra_pcie_port_disable(port);
1575 tegra_pcie_port_free(port);
1576 }
1577
1578 memset(&hw, 0, sizeof(hw));
1579
1580 hw.nr_controllers = 1;
1581 hw.private_data = (void **)&pcie;
1582 hw.setup = tegra_pcie_setup;
1583 hw.map_irq = tegra_pcie_map_irq;
1584 hw.add_bus = tegra_pcie_add_bus;
1585 hw.scan = tegra_pcie_scan_bus;
1586 hw.ops = &tegra_pcie_ops;
1587
1588 pci_common_init_dev(pcie->dev, &hw);
1589
1590 return 0;
1591}
1592
1593static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1594 .num_ports = 2,
1595 .msi_base_shift = 0,
1596 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1597 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1598 .has_pex_clkreq_en = false,
1599 .has_pex_bias_ctrl = false,
1600 .has_intr_prsnt_sense = false,
1601 .has_avdd_supply = false,
1602 .has_cml_clk = false,
1603};
1604
1605static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1606 .num_ports = 3,
1607 .msi_base_shift = 8,
1608 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1609 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1610 .has_pex_clkreq_en = true,
1611 .has_pex_bias_ctrl = true,
1612 .has_intr_prsnt_sense = true,
1613 .has_avdd_supply = true,
1614 .has_cml_clk = true,
1615};
1616
1617static const struct of_device_id tegra_pcie_of_match[] = {
1618 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1619 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1620 { },
1621};
1622MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1623
1624static int tegra_pcie_probe(struct platform_device *pdev)
1625{
1626 const struct of_device_id *match;
1627 struct tegra_pcie *pcie;
1628 int err;
1629
1630 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1631 if (!match)
1632 return -ENODEV;
1633
1634 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1635 if (!pcie)
1636 return -ENOMEM;
1637
1638 INIT_LIST_HEAD(&pcie->busses);
1639 INIT_LIST_HEAD(&pcie->ports);
1640 pcie->soc_data = match->data;
1641 pcie->dev = &pdev->dev;
1642
1643 err = tegra_pcie_parse_dt(pcie);
1644 if (err < 0)
1645 return err;
1646
1647 pcibios_min_mem = 0;
1648
1649 err = tegra_pcie_get_resources(pcie);
1650 if (err < 0) {
1651 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1652 return err;
1653 }
1654
1655 err = tegra_pcie_enable_controller(pcie);
1656 if (err)
1657 goto put_resources;
1658
1659 /* setup the AFI address translations */
1660 tegra_pcie_setup_translations(pcie);
1661
1662 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1663 err = tegra_pcie_enable_msi(pcie);
1664 if (err < 0) {
1665 dev_err(&pdev->dev,
1666 "failed to enable MSI support: %d\n",
1667 err);
1668 goto put_resources;
1669 }
1670 }
1671
1672 err = tegra_pcie_enable(pcie);
1673 if (err < 0) {
1674 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1675 goto disable_msi;
1676 }
1677
1678 platform_set_drvdata(pdev, pcie);
1679 return 0;
1680
1681disable_msi:
1682 if (IS_ENABLED(CONFIG_PCI_MSI))
1683 tegra_pcie_disable_msi(pcie);
1684put_resources:
1685 tegra_pcie_put_resources(pcie);
1686 return err;
1687}
1688
1689static struct platform_driver tegra_pcie_driver = {
1690 .driver = {
1691 .name = "tegra-pcie",
1692 .owner = THIS_MODULE,
1693 .of_match_table = tegra_pcie_of_match,
1694 .suppress_bind_attrs = true,
1695 },
1696 .probe = tegra_pcie_probe,
1697};
1698module_platform_driver(tegra_pcie_driver);
1699
1700MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1701MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1702MODULE_LICENSE("GPLv2");