aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/host
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 15:03:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 15:03:49 -0400
commit80213c03c4151d900cf293ef0fc51f8d88495e14 (patch)
treeaf2422fa255aed96c23cef894e0adbf817f30c45 /drivers/pci/host
parentea584595fc85e65796335033dfca25ed655cd0ed (diff)
parentf92d9ee3ab39841d1f29f2d1aa96ff7c74b36ee1 (diff)
Merge tag 'pci-v3.18-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas: "The interesting things here are: - Turn on Config Request Retry Status Software Visibility. This caused hangs last time, but we included a fix this time. - Rework PCI device configuration to use _HPP/_HPX more aggressively - Allow PCI devices to be put into D3cold during system suspend - Add arm64 PCI support - Add APM X-Gene host bridge driver - Add TI Keystone host bridge driver - Add Xilinx AXI host bridge driver More detailed summary: Enumeration - Check Vendor ID only for Config Request Retry Status (Rajat Jain) - Enable Config Request Retry Status when supported (Rajat Jain) - Add generic domain handling (Catalin Marinas) - Generate uppercase hex for modalias interface class (Ricardo Ribalda Delgado) Resource management - Add missing MEM_64 mask in pci_assign_unassigned_bridge_resources() (Yinghai Lu) - Increase IBM ipr SAS Crocodile BARs to at least system page size (Douglas Lehr) PCI device hotplug - Prevent NULL dereference during pciehp probe (Andreas Noever) - Move _HPP & _HPX handling into core (Bjorn Helgaas) - Apply _HPP to PCIe devices as well as PCI (Bjorn Helgaas) - Apply _HPP/_HPX to display devices (Bjorn Helgaas) - Preserve SERR & PARITY settings when applying _HPP/_HPX (Bjorn Helgaas) - Preserve MPS and MRRS settings when applying _HPP/_HPX (Bjorn Helgaas) - Apply _HPP/_HPX to all devices, not just hot-added ones (Bjorn Helgaas) - Fix wait time in pciehp timeout message (Yinghai Lu) - Add more pciehp Slot Control debug output (Yinghai Lu) - Stop disabling pciehp notifications during init (Yinghai Lu) MSI - Remove arch_msi_check_device() (Alexander Gordeev) - Rename pci_msi_check_device() to pci_msi_supported() (Alexander Gordeev) - Move D0 check into pci_msi_check_device() (Alexander Gordeev) - Remove unused kobject from struct msi_desc (Yijing Wang) - Remove "pos" from the struct msi_desc msi_attrib (Yijing Wang) - Add "msi_bus" sysfs MSI/MSI-X control for endpoints (Yijing Wang) - Use __get_cached_msi_msg() instead of get_cached_msi_msg() (Yijing Wang) - Use __read_msi_msg() instead of read_msi_msg() (Yijing Wang) - Use __write_msi_msg() instead of write_msi_msg() (Yijing Wang) Power management - Drop unused runtime PM support code for PCIe ports (Rafael J. Wysocki) - Allow PCI devices to be put into D3cold during system suspend (Rafael J. Wysocki) AER - Add additional AER error strings (Gong Chen) - Make <linux/aer.h> standalone includable (Thierry Reding) Virtualization - Add ACS quirk for Solarflare SFC9120 & SFC9140 (Alex Williamson) - Add ACS quirk for Intel 10G NICs (Alex Williamson) - Add ACS quirk for AMD A88X southbridge (Marti Raudsepp) - Remove unused pci_find_upstream_pcie_bridge(), pci_get_dma_source() (Alex Williamson) - Add device flag helpers (Ethan Zhao) - Assume all Mellanox devices have broken INTx masking (Gavin Shan) Generic host bridge driver - Fix ioport_map() for !CONFIG_GENERIC_IOMAP (Liviu Dudau) - Add pci_register_io_range() and pci_pio_to_address() (Liviu Dudau) - Define PCI_IOBASE as the base of virtual PCI IO space (Liviu Dudau) - Fix the conversion of IO ranges into IO resources (Liviu Dudau) - Add pci_get_new_domain_nr() and of_get_pci_domain_nr() (Liviu Dudau) - Add support for parsing PCI host bridge resources from DT (Liviu Dudau) - Add pci_remap_iospace() to map bus I/O resources (Liviu Dudau) - Add arm64 architectural support for PCI (Liviu Dudau) APM X-Gene - Add APM X-Gene PCIe driver (Tanmay Inamdar) - Add arm64 DT APM X-Gene PCIe device tree nodes (Tanmay Inamdar) Freescale i.MX6 - Probe in module_init(), not fs_initcall() (Lucas Stach) - Delay enabling reference clock for SS until it stabilizes (Tim Harvey) Marvell MVEBU - Fix uninitialized variable in mvebu_get_tgt_attr() (Thomas Petazzoni) NVIDIA Tegra - Make sure the PCIe PLL is really reset (Eric Yuen) - Add error path tegra_msi_teardown_irq() cleanup (Jisheng Zhang) - Fix extended configuration space mapping (Peter Daifuku) - Implement resource hierarchy (Thierry Reding) - Clear CLKREQ# enable on port disable (Thierry Reding) - Add Tegra124 support (Thierry Reding) ST Microelectronics SPEAr13xx - Pass config resource through reg property (Pratyush Anand) Synopsys DesignWare - Use NULL instead of false (Fabio Estevam) - Parse bus-range property from devicetree (Lucas Stach) - Use pci_create_root_bus() instead of pci_scan_root_bus() (Lucas Stach) - Remove pci_assign_unassigned_resources() (Lucas Stach) - Check private_data validity in single place (Lucas Stach) - Setup and clear exactly one MSI at a time (Lucas Stach) - Remove open-coded bitmap operations (Lucas Stach) - Fix configuration base address when using 'reg' (Minghuan Lian) - Fix IO resource end address calculation (Minghuan Lian) - Rename get_msi_data() to get_msi_addr() (Minghuan Lian) - Add get_msi_data() to pcie_host_ops (Minghuan Lian) - Add support for v3.65 hardware (Murali Karicheri) - Fold struct pcie_port_info into struct pcie_port (Pratyush Anand) TI Keystone - Add TI Keystone PCIe driver (Murali Karicheri) - Limit MRSS for all downstream devices (Murali Karicheri) - Assume controller is already in RC mode (Murali Karicheri) - Set device ID based on SoC to support multiple ports (Murali Karicheri) Xilinx AXI - Add Xilinx AXI PCIe driver (Srikanth Thokala) - Fix xilinx_pcie_assign_msi() return value test (Dan Carpenter) Miscellaneous - Clean up whitespace (Quentin Lambert) - Remove assignments from "if" conditions (Quentin Lambert) - Move PCI_VENDOR_ID_VMWARE to pci_ids.h (Francesco Ruggeri) - x86: Mark DMI tables as initialization data (Mathias Krause) - x86: Move __init annotation to the correct place (Mathias Krause) - x86: Mark constants of pci_mmcfg_nvidia_mcp55() as __initconst (Mathias Krause) - x86: Constify pci_mmcfg_probes[] array (Mathias Krause) - x86: Mark PCI BIOS initialization code as such (Mathias Krause) - Parenthesize PCI_DEVID and PCI_VPD_LRDT_ID parameters (Megan Kamiya) - Remove unnecessary variable in pci_add_dynid() (Tobias Klauser)" * tag 'pci-v3.18-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (109 commits) arm64: dts: Add APM X-Gene PCIe device tree nodes PCI: Add ACS quirk for AMD A88X southbridge devices PCI: xgene: Add APM X-Gene PCIe driver PCI: designware: Remove open-coded bitmap operations PCI/MSI: Remove unnecessary temporary variable PCI/MSI: Use __write_msi_msg() instead of write_msi_msg() MSI/powerpc: Use __read_msi_msg() instead of read_msi_msg() PCI/MSI: Use __get_cached_msi_msg() instead of get_cached_msi_msg() PCI/MSI: Add "msi_bus" sysfs MSI/MSI-X control for endpoints PCI/MSI: Remove "pos" from the struct msi_desc msi_attrib PCI/MSI: Remove unused kobject from struct msi_desc PCI/MSI: Rename pci_msi_check_device() to pci_msi_supported() PCI/MSI: Move D0 check into pci_msi_check_device() PCI/MSI: Remove arch_msi_check_device() irqchip: armada-370-xp: Remove arch_msi_check_device() PCI/MSI/PPC: Remove arch_msi_check_device() arm64: Add architectural support for PCI PCI: Add pci_remap_iospace() to map bus I/O resources of/pci: Add support for parsing PCI host bridge resources from DT of/pci: Add pci_get_new_domain_nr() and of_get_pci_domain_nr() ... Conflicts: arch/arm64/boot/dts/apm-storm.dtsi
Diffstat (limited to 'drivers/pci/host')
-rw-r--r--drivers/pci/host/Kconfig28
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-imx6.c13
-rw-r--r--drivers/pci/host/pci-keystone-dw.c516
-rw-r--r--drivers/pci/host/pci-keystone.c415
-rw-r--r--drivers/pci/host/pci-keystone.h58
-rw-r--r--drivers/pci/host/pci-mvebu.c6
-rw-r--r--drivers/pci/host/pci-tegra.c277
-rw-r--r--drivers/pci/host/pci-xgene.c659
-rw-r--r--drivers/pci/host/pcie-designware.c268
-rw-r--r--drivers/pci/host/pcie-designware.h22
-rw-r--r--drivers/pci/host/pcie-rcar.c21
-rw-r--r--drivers/pci/host/pcie-spear13xx.c2
-rw-r--r--drivers/pci/host/pcie-xilinx.c970
14 files changed, 3023 insertions, 235 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 90f5ccacce4b..3dc25fad490c 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -63,4 +63,32 @@ config PCIE_SPEAR13XX
63 help 63 help
64 Say Y here if you want PCIe support on SPEAr13XX SoCs. 64 Say Y here if you want PCIe support on SPEAr13XX SoCs.
65 65
66config PCI_KEYSTONE
67 bool "TI Keystone PCIe controller"
68 depends on ARCH_KEYSTONE
69 select PCIE_DW
70 select PCIEPORTBUS
71 help
72 Say Y here if you want to enable PCI controller support on Keystone
73 SoCs. The PCI controller on Keystone is based on Designware hardware
74 and therefore the driver re-uses the Designware core functions to
75 implement the driver.
76
77config PCIE_XILINX
78 bool "Xilinx AXI PCIe host bridge support"
79 depends on ARCH_ZYNQ
80 help
81 Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
82 Host Bridge driver.
83
84config PCI_XGENE
85 bool "X-Gene PCIe controller"
86 depends on ARCH_XGENE
87 depends on OF
88 select PCIEPORTBUS
89 help
90 Say Y here if you want internal PCI support on APM X-Gene SoC.
91 There are 5 internal PCIe ports available. Each port is GEN3 capable
92 and have varied lanes from x1 to x8.
93
66endmenu 94endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d0e88f114ff9..26b3461d68d7 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -8,3 +8,6 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
8obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o 8obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
9obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o 9obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
10obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o 10obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
11obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 35fc73a8d0b3..233fe8a88264 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -257,11 +257,6 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
257 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 257 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
258 int ret; 258 int ret;
259 259
260 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
261 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
262 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
263 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
264
265 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 260 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
266 if (ret) { 261 if (ret) {
267 dev_err(pp->dev, "unable to enable pcie_phy clock\n"); 262 dev_err(pp->dev, "unable to enable pcie_phy clock\n");
@@ -283,6 +278,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
283 /* allow the clocks to stabilize */ 278 /* allow the clocks to stabilize */
284 usleep_range(200, 500); 279 usleep_range(200, 500);
285 280
281 /* power up core phy and enable ref clock */
282 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
283 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
284 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
285 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
286
286 /* Some boards don't have PCIe reset GPIO. */ 287 /* Some boards don't have PCIe reset GPIO. */
287 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 288 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
288 gpio_set_value(imx6_pcie->reset_gpio, 0); 289 gpio_set_value(imx6_pcie->reset_gpio, 0);
@@ -647,7 +648,7 @@ static int __init imx6_pcie_init(void)
647{ 648{
648 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); 649 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
649} 650}
650fs_initcall(imx6_pcie_init); 651module_init(imx6_pcie_init);
651 652
652MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>"); 653MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
653MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver"); 654MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
new file mode 100644
index 000000000000..34086ce88e8e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -0,0 +1,516 @@
1/*
2 * Designware application register space functions for Keystone PCI controller
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_pci.h>
20#include <linux/pci.h>
21#include <linux/platform_device.h>
22
23#include "pcie-designware.h"
24#include "pci-keystone.h"
25
26/* Application register defines */
27#define LTSSM_EN_VAL 1
28#define LTSSM_STATE_MASK 0x1f
29#define LTSSM_STATE_L0 0x11
30#define DBI_CS2_EN_VAL 0x20
31#define OB_XLAT_EN_VAL 2
32
33/* Application registers */
34#define CMD_STATUS 0x004
35#define CFG_SETUP 0x008
36#define OB_SIZE 0x030
37#define CFG_PCIM_WIN_SZ_IDX 3
38#define CFG_PCIM_WIN_CNT 32
39#define SPACE0_REMOTE_CFG_OFFSET 0x1000
40#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
41#define OB_OFFSET_HI(n) (0x204 + (8 * n))
42
43/* IRQ register defines */
44#define IRQ_EOI 0x050
45#define IRQ_STATUS 0x184
46#define IRQ_ENABLE_SET 0x188
47#define IRQ_ENABLE_CLR 0x18c
48
49#define MSI_IRQ 0x054
50#define MSI0_IRQ_STATUS 0x104
51#define MSI0_IRQ_ENABLE_SET 0x108
52#define MSI0_IRQ_ENABLE_CLR 0x10c
53#define IRQ_STATUS 0x184
54#define MSI_IRQ_OFFSET 4
55
56/* Config space registers */
57#define DEBUG0 0x728
58
59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
60
61static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
62{
63 return sys->private_data;
64}
65
66static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
67 u32 *bit_pos)
68{
69 *reg_offset = offset % 8;
70 *bit_pos = offset >> 3;
71}
72
73u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
74{
75 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
76
77 return ks_pcie->app.start + MSI_IRQ;
78}
79
80void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
81{
82 struct pcie_port *pp = &ks_pcie->pp;
83 u32 pending, vector;
84 int src, virq;
85
86 pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
87
88 /*
89 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
90 * shows 1, 9, 17, 25 and so forth
91 */
92 for (src = 0; src < 4; src++) {
93 if (BIT(src) & pending) {
94 vector = offset + (src << 3);
95 virq = irq_linear_revmap(pp->irq_domain, vector);
96 dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
97 src, vector, virq);
98 generic_handle_irq(virq);
99 }
100 }
101}
102
103static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
104{
105 u32 offset, reg_offset, bit_pos;
106 struct keystone_pcie *ks_pcie;
107 unsigned int irq = d->irq;
108 struct msi_desc *msi;
109 struct pcie_port *pp;
110
111 msi = irq_get_msi_desc(irq);
112 pp = sys_to_pcie(msi->dev->bus->sysdata);
113 ks_pcie = to_keystone_pcie(pp);
114 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
115 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
116
117 writel(BIT(bit_pos),
118 ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
119 writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
120}
121
122void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
123{
124 u32 reg_offset, bit_pos;
125 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
126
127 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
128 writel(BIT(bit_pos),
129 ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
130}
131
132void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
133{
134 u32 reg_offset, bit_pos;
135 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
136
137 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
138 writel(BIT(bit_pos),
139 ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
140}
141
142static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
143{
144 struct keystone_pcie *ks_pcie;
145 unsigned int irq = d->irq;
146 struct msi_desc *msi;
147 struct pcie_port *pp;
148 u32 offset;
149
150 msi = irq_get_msi_desc(irq);
151 pp = sys_to_pcie(msi->dev->bus->sysdata);
152 ks_pcie = to_keystone_pcie(pp);
153 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
154
155 /* Mask the end point if PVM implemented */
156 if (IS_ENABLED(CONFIG_PCI_MSI)) {
157 if (msi->msi_attrib.maskbit)
158 mask_msi_irq(d);
159 }
160
161 ks_dw_pcie_msi_clear_irq(pp, offset);
162}
163
164static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
165{
166 struct keystone_pcie *ks_pcie;
167 unsigned int irq = d->irq;
168 struct msi_desc *msi;
169 struct pcie_port *pp;
170 u32 offset;
171
172 msi = irq_get_msi_desc(irq);
173 pp = sys_to_pcie(msi->dev->bus->sysdata);
174 ks_pcie = to_keystone_pcie(pp);
175 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
176
177 /* Mask the end point if PVM implemented */
178 if (IS_ENABLED(CONFIG_PCI_MSI)) {
179 if (msi->msi_attrib.maskbit)
180 unmask_msi_irq(d);
181 }
182
183 ks_dw_pcie_msi_set_irq(pp, offset);
184}
185
186static struct irq_chip ks_dw_pcie_msi_irq_chip = {
187 .name = "Keystone-PCIe-MSI-IRQ",
188 .irq_ack = ks_dw_pcie_msi_irq_ack,
189 .irq_mask = ks_dw_pcie_msi_irq_mask,
190 .irq_unmask = ks_dw_pcie_msi_irq_unmask,
191};
192
193static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
194 irq_hw_number_t hwirq)
195{
196 irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
197 handle_level_irq);
198 irq_set_chip_data(irq, domain->host_data);
199 set_irq_flags(irq, IRQF_VALID);
200
201 return 0;
202}
203
204const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
205 .map = ks_dw_pcie_msi_map,
206};
207
208int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip)
209{
210 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
211 int i;
212
213 pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
214 MAX_MSI_IRQS,
215 &ks_dw_pcie_msi_domain_ops,
216 chip);
217 if (!pp->irq_domain) {
218 dev_err(pp->dev, "irq domain init failed\n");
219 return -ENXIO;
220 }
221
222 for (i = 0; i < MAX_MSI_IRQS; i++)
223 irq_create_mapping(pp->irq_domain, i);
224
225 return 0;
226}
227
228void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
229{
230 int i;
231
232 for (i = 0; i < MAX_LEGACY_IRQS; i++)
233 writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
234}
235
236void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
237{
238 struct pcie_port *pp = &ks_pcie->pp;
239 u32 pending;
240 int virq;
241
242 pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
243
244 if (BIT(0) & pending) {
245 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
246 dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
247 virq);
248 generic_handle_irq(virq);
249 }
250
251 /* EOI the INTx interrupt */
252 writel(offset, ks_pcie->va_app_base + IRQ_EOI);
253}
254
255static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
256{
257}
258
259static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
260{
261}
262
263static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
264{
265}
266
267static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
268 .name = "Keystone-PCI-Legacy-IRQ",
269 .irq_ack = ks_dw_pcie_ack_legacy_irq,
270 .irq_mask = ks_dw_pcie_mask_legacy_irq,
271 .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
272};
273
274static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
275 unsigned int irq, irq_hw_number_t hw_irq)
276{
277 irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
278 handle_level_irq);
279 irq_set_chip_data(irq, d->host_data);
280 set_irq_flags(irq, IRQF_VALID);
281
282 return 0;
283}
284
285static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
286 .map = ks_dw_pcie_init_legacy_irq_map,
287 .xlate = irq_domain_xlate_onetwocell,
288};
289
290/**
291 * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
292 * registers
293 *
294 * Since modification of dbi_cs2 involves different clock domain, read the
295 * status back to ensure the transition is complete.
296 */
297static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
298{
299 u32 val;
300
301 writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
302 reg_virt + CMD_STATUS);
303
304 do {
305 val = readl(reg_virt + CMD_STATUS);
306 } while (!(val & DBI_CS2_EN_VAL));
307}
308
309/**
310 * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
311 *
312 * Since modification of dbi_cs2 involves different clock domain, read the
313 * status back to ensure the transition is complete.
314 */
315static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
316{
317 u32 val;
318
319 writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
320 reg_virt + CMD_STATUS);
321
322 do {
323 val = readl(reg_virt + CMD_STATUS);
324 } while (val & DBI_CS2_EN_VAL);
325}
326
327void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
328{
329 struct pcie_port *pp = &ks_pcie->pp;
330 u32 start = pp->mem.start, end = pp->mem.end;
331 int i, tr_size;
332
333 /* Disable BARs for inbound access */
334 ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
335 writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
336 writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
337 ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
338
339 /* Set outbound translation size per window division */
340 writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
341
342 tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
343
344 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
345 for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
346 writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
347 writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
348 start += tr_size;
349 }
350
351 /* Enable OB translation */
352 writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
353 ks_pcie->va_app_base + CMD_STATUS);
354}
355
356/**
357 * ks_pcie_cfg_setup() - Set up configuration space address for a device
358 *
359 * @ks_pcie: ptr to keystone_pcie structure
360 * @bus: Bus number the device is residing on
361 * @devfn: device, function number info
362 *
363 * Forms and returns the address of configuration space mapped in PCIESS
364 * address space 0. Also configures CFG_SETUP for remote configuration space
365 * access.
366 *
367 * The address space has two regions to access configuration - local and remote.
368 * We access local region for bus 0 (as RC is attached on bus 0) and remote
369 * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
370 * we will do TYPE 0 access as it will be on our secondary bus (logical).
371 * CFG_SETUP is needed only for remote configuration access.
372 */
373static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
374 unsigned int devfn)
375{
376 u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
377 struct pcie_port *pp = &ks_pcie->pp;
378 u32 regval;
379
380 if (bus == 0)
381 return pp->dbi_base;
382
383 regval = (bus << 16) | (device << 8) | function;
384
385 /*
386 * Since Bus#1 will be a virtual bus, we need to have TYPE0
387 * access only.
388 * TYPE 1
389 */
390 if (bus != 1)
391 regval |= BIT(24);
392
393 writel(regval, ks_pcie->va_app_base + CFG_SETUP);
394 return pp->va_cfg0_base;
395}
396
397int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
398 unsigned int devfn, int where, int size, u32 *val)
399{
400 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
401 u8 bus_num = bus->number;
402 void __iomem *addr;
403
404 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
405
406 return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
407}
408
409int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
410 unsigned int devfn, int where, int size, u32 val)
411{
412 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
413 u8 bus_num = bus->number;
414 void __iomem *addr;
415
416 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
417
418 return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
419}
420
421/**
422 * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
423 *
424 * This sets BAR0 to enable inbound access for MSI_IRQ register
425 */
426void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
427{
428 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
429
430 /* Configure and set up BAR0 */
431 ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
432
433 /* Enable BAR0 */
434 writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
435 writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
436
437 ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
438
439 /*
440 * For BAR0, just setting bus address for inbound writes (MSI) should
441 * be sufficient. Use physical address to avoid any conflicts.
442 */
443 writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
444}
445
446/**
447 * ks_dw_pcie_link_up() - Check if link up
448 */
449int ks_dw_pcie_link_up(struct pcie_port *pp)
450{
451 u32 val = readl(pp->dbi_base + DEBUG0);
452
453 return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
454}
455
456void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
457{
458 u32 val;
459
460 /* Disable Link training */
461 val = readl(ks_pcie->va_app_base + CMD_STATUS);
462 val &= ~LTSSM_EN_VAL;
463 writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
464
465 /* Initiate Link Training */
466 val = readl(ks_pcie->va_app_base + CMD_STATUS);
467 writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
468}
469
470/**
471 * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
472 *
473 * Ioremap the register resources, initialize legacy irq domain
474 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
475 * PCI host controller.
476 */
477int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
478 struct device_node *msi_intc_np)
479{
480 struct pcie_port *pp = &ks_pcie->pp;
481 struct platform_device *pdev = to_platform_device(pp->dev);
482 struct resource *res;
483
484 /* Index 0 is the config reg. space address */
485 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
486 pp->dbi_base = devm_ioremap_resource(pp->dev, res);
487 if (IS_ERR(pp->dbi_base))
488 return PTR_ERR(pp->dbi_base);
489
490 /*
491 * We set these same and is used in pcie rd/wr_other_conf
492 * functions
493 */
494 pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
495 pp->va_cfg1_base = pp->va_cfg0_base;
496
497 /* Index 1 is the application reg. space address */
498 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
499 ks_pcie->app = *res;
500 ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
501 if (IS_ERR(ks_pcie->va_app_base))
502 return PTR_ERR(ks_pcie->va_app_base);
503
504 /* Create legacy IRQ domain */
505 ks_pcie->legacy_irq_domain =
506 irq_domain_add_linear(ks_pcie->legacy_intc_np,
507 MAX_LEGACY_IRQS,
508 &ks_dw_pcie_legacy_irq_domain_ops,
509 NULL);
510 if (!ks_pcie->legacy_irq_domain) {
511 dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
512 return -EINVAL;
513 }
514
515 return dw_pcie_host_init(pp);
516}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
new file mode 100644
index 000000000000..1b893bc8b842
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.c
@@ -0,0 +1,415 @@
1/*
2 * PCIe host controller driver for Texas Instruments Keystone SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 * Implementation based on pci-exynos.c and pcie-designware.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/irqchip/chained_irq.h>
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/irqdomain.h>
19#include <linux/module.h>
20#include <linux/msi.h>
21#include <linux/of_irq.h>
22#include <linux/of.h>
23#include <linux/of_pci.h>
24#include <linux/platform_device.h>
25#include <linux/phy/phy.h>
26#include <linux/resource.h>
27#include <linux/signal.h>
28
29#include "pcie-designware.h"
30#include "pci-keystone.h"
31
32#define DRIVER_NAME "keystone-pcie"
33
34/* driver specific constants */
35#define MAX_MSI_HOST_IRQS 8
36#define MAX_LEGACY_HOST_IRQS 4
37
38/* DEV_STAT_CTRL */
39#define PCIE_CAP_BASE 0x70
40
41/* PCIE controller device IDs */
42#define PCIE_RC_K2HK 0xb008
43#define PCIE_RC_K2E 0xb009
44#define PCIE_RC_K2L 0xb00a
45
46#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
47
48static void quirk_limit_mrrs(struct pci_dev *dev)
49{
50 struct pci_bus *bus = dev->bus;
51 struct pci_dev *bridge = bus->self;
52 static const struct pci_device_id rc_pci_devids[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
54 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
55 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
56 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
57 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
58 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
59 { 0, },
60 };
61
62 if (pci_is_root_bus(bus))
63 return;
64
65 /* look for the host bridge */
66 while (!pci_is_root_bus(bus)) {
67 bridge = bus->self;
68 bus = bus->parent;
69 }
70
71 if (bridge) {
72 /*
73 * Keystone PCI controller has a h/w limitation of
74 * 256 bytes maximum read request size. It can't handle
75 * anything higher than this. So force this limit on
76 * all downstream devices.
77 */
78 if (pci_match_id(rc_pci_devids, bridge)) {
79 if (pcie_get_readrq(dev) > 256) {
80 dev_info(&dev->dev, "limiting MRRS to 256\n");
81 pcie_set_readrq(dev, 256);
82 }
83 }
84 }
85}
86DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
87
88static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
89{
90 struct pcie_port *pp = &ks_pcie->pp;
91 int count = 200;
92
93 dw_pcie_setup_rc(pp);
94
95 if (dw_pcie_link_up(pp)) {
96 dev_err(pp->dev, "Link already up\n");
97 return 0;
98 }
99
100 ks_dw_pcie_initiate_link_train(ks_pcie);
101 /* check if the link is up or not */
102 while (!dw_pcie_link_up(pp)) {
103 usleep_range(100, 1000);
104 if (--count) {
105 ks_dw_pcie_initiate_link_train(ks_pcie);
106 continue;
107 }
108 dev_err(pp->dev, "phy link never came up\n");
109 return -EINVAL;
110 }
111
112 return 0;
113}
114
115static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
116{
117 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
118 u32 offset = irq - ks_pcie->msi_host_irqs[0];
119 struct pcie_port *pp = &ks_pcie->pp;
120 struct irq_chip *chip = irq_desc_get_chip(desc);
121
122 dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
123
124 /*
125 * The chained irq handler installation would have replaced normal
126 * interrupt driver handler so we need to take care of mask/unmask and
127 * ack operation.
128 */
129 chained_irq_enter(chip, desc);
130 ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
131 chained_irq_exit(chip, desc);
132}
133
134/**
135 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
136 * @irq: IRQ line for legacy interrupts
137 * @desc: Pointer to irq descriptor
138 *
139 * Traverse through pending legacy interrupts and invoke handler for each. Also
140 * takes care of interrupt controller level mask/ack operation.
141 */
142static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
143{
144 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
145 struct pcie_port *pp = &ks_pcie->pp;
146 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
147 struct irq_chip *chip = irq_desc_get_chip(desc);
148
149 dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
150
151 /*
152 * The chained irq handler installation would have replaced normal
153 * interrupt driver handler so we need to take care of mask/unmask and
154 * ack operation.
155 */
156 chained_irq_enter(chip, desc);
157 ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
158 chained_irq_exit(chip, desc);
159}
160
161static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
162 char *controller, int *num_irqs)
163{
164 int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
165 struct device *dev = ks_pcie->pp.dev;
166 struct device_node *np_pcie = dev->of_node, **np_temp;
167
168 if (!strcmp(controller, "msi-interrupt-controller"))
169 legacy = 0;
170
171 if (legacy) {
172 np_temp = &ks_pcie->legacy_intc_np;
173 max_host_irqs = MAX_LEGACY_HOST_IRQS;
174 host_irqs = &ks_pcie->legacy_host_irqs[0];
175 } else {
176 np_temp = &ks_pcie->msi_intc_np;
177 max_host_irqs = MAX_MSI_HOST_IRQS;
178 host_irqs = &ks_pcie->msi_host_irqs[0];
179 }
180
181 /* interrupt controller is in a child node */
182 *np_temp = of_find_node_by_name(np_pcie, controller);
183 if (!(*np_temp)) {
184 dev_err(dev, "Node for %s is absent\n", controller);
185 goto out;
186 }
187 temp = of_irq_count(*np_temp);
188 if (!temp)
189 goto out;
190 if (temp > max_host_irqs)
191 dev_warn(dev, "Too many %s interrupts defined %u\n",
192 (legacy ? "legacy" : "MSI"), temp);
193
194 /*
195 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
196 * 7 (MSI)
197 */
198 for (temp = 0; temp < max_host_irqs; temp++) {
199 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
200 if (host_irqs[temp] < 0)
201 break;
202 }
203 if (temp) {
204 *num_irqs = temp;
205 ret = 0;
206 }
207out:
208 return ret;
209}
210
211static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
212{
213 int i;
214
215 /* Legacy IRQ */
216 for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
217 irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
218 irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
219 ks_pcie_legacy_irq_handler);
220 }
221 ks_dw_pcie_enable_legacy_irqs(ks_pcie);
222
223 /* MSI IRQ */
224 if (IS_ENABLED(CONFIG_PCI_MSI)) {
225 for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
226 irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
227 ks_pcie_msi_irq_handler);
228 irq_set_handler_data(ks_pcie->msi_host_irqs[i],
229 ks_pcie);
230 }
231 }
232}
233
234/*
235 * When a PCI device does not exist during config cycles, keystone host gets a
236 * bus error instead of returning 0xffffffff. This handler always returns 0
237 * for this kind of faults.
238 */
239static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
240 struct pt_regs *regs)
241{
242 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
243
244 if ((instr & 0x0e100090) == 0x00100090) {
245 int reg = (instr >> 12) & 15;
246
247 regs->uregs[reg] = -1;
248 regs->ARM_pc += 4;
249 }
250
251 return 0;
252}
253
254static void __init ks_pcie_host_init(struct pcie_port *pp)
255{
256 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
257 u32 val;
258
259 ks_pcie_establish_link(ks_pcie);
260 ks_dw_pcie_setup_rc_app_regs(ks_pcie);
261 ks_pcie_setup_interrupts(ks_pcie);
262 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
263 pp->dbi_base + PCI_IO_BASE);
264
265 /* update the Vendor ID */
266 writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
267
268 /* update the DEV_STAT_CTRL to publish right mrrs */
269 val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
270 val &= ~PCI_EXP_DEVCTL_READRQ;
271 /* set the mrrs to 256 bytes */
272 val |= BIT(12);
273 writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
274
275 /*
276 * PCIe access errors that result into OCP errors are caught by ARM as
277 * "External aborts"
278 */
279 hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
280 "Asynchronous external abort");
281}
282
283static struct pcie_host_ops keystone_pcie_host_ops = {
284 .rd_other_conf = ks_dw_pcie_rd_other_conf,
285 .wr_other_conf = ks_dw_pcie_wr_other_conf,
286 .link_up = ks_dw_pcie_link_up,
287 .host_init = ks_pcie_host_init,
288 .msi_set_irq = ks_dw_pcie_msi_set_irq,
289 .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
290 .get_msi_addr = ks_dw_pcie_get_msi_addr,
291 .msi_host_init = ks_dw_pcie_msi_host_init,
292 .scan_bus = ks_dw_pcie_v3_65_scan_bus,
293};
294
295static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
296 struct platform_device *pdev)
297{
298 struct pcie_port *pp = &ks_pcie->pp;
299 int ret;
300
301 ret = ks_pcie_get_irq_controller_info(ks_pcie,
302 "legacy-interrupt-controller",
303 &ks_pcie->num_legacy_host_irqs);
304 if (ret)
305 return ret;
306
307 if (IS_ENABLED(CONFIG_PCI_MSI)) {
308 ret = ks_pcie_get_irq_controller_info(ks_pcie,
309 "msi-interrupt-controller",
310 &ks_pcie->num_msi_host_irqs);
311 if (ret)
312 return ret;
313 }
314
315 pp->root_bus_nr = -1;
316 pp->ops = &keystone_pcie_host_ops;
317 ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
318 if (ret) {
319 dev_err(&pdev->dev, "failed to initialize host\n");
320 return ret;
321 }
322
323 return ret;
324}
325
326static const struct of_device_id ks_pcie_of_match[] = {
327 {
328 .type = "pci",
329 .compatible = "ti,keystone-pcie",
330 },
331 { },
332};
333MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
334
335static int __exit ks_pcie_remove(struct platform_device *pdev)
336{
337 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
338
339 clk_disable_unprepare(ks_pcie->clk);
340
341 return 0;
342}
343
344static int __init ks_pcie_probe(struct platform_device *pdev)
345{
346 struct device *dev = &pdev->dev;
347 struct keystone_pcie *ks_pcie;
348 struct pcie_port *pp;
349 struct resource *res;
350 void __iomem *reg_p;
351 struct phy *phy;
352 int ret = 0;
353
354 ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
355 GFP_KERNEL);
356 if (!ks_pcie) {
357 dev_err(dev, "no memory for keystone pcie\n");
358 return -ENOMEM;
359 }
360 pp = &ks_pcie->pp;
361
362 /* initialize SerDes Phy if present */
363 phy = devm_phy_get(dev, "pcie-phy");
364 if (!IS_ERR_OR_NULL(phy)) {
365 ret = phy_init(phy);
366 if (ret < 0)
367 return ret;
368 }
369
370 /* index 2 is to read PCI DEVICE_ID */
371 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
372 reg_p = devm_ioremap_resource(dev, res);
373 if (IS_ERR(reg_p))
374 return PTR_ERR(reg_p);
375 ks_pcie->device_id = readl(reg_p) >> 16;
376 devm_iounmap(dev, reg_p);
377 devm_release_mem_region(dev, res->start, resource_size(res));
378
379 pp->dev = dev;
380 platform_set_drvdata(pdev, ks_pcie);
381 ks_pcie->clk = devm_clk_get(dev, "pcie");
382 if (IS_ERR(ks_pcie->clk)) {
383 dev_err(dev, "Failed to get pcie rc clock\n");
384 return PTR_ERR(ks_pcie->clk);
385 }
386 ret = clk_prepare_enable(ks_pcie->clk);
387 if (ret)
388 return ret;
389
390 ret = ks_add_pcie_port(ks_pcie, pdev);
391 if (ret < 0)
392 goto fail_clk;
393
394 return 0;
395fail_clk:
396 clk_disable_unprepare(ks_pcie->clk);
397
398 return ret;
399}
400
401static struct platform_driver ks_pcie_driver __refdata = {
402 .probe = ks_pcie_probe,
403 .remove = __exit_p(ks_pcie_remove),
404 .driver = {
405 .name = "keystone-pcie",
406 .owner = THIS_MODULE,
407 .of_match_table = of_match_ptr(ks_pcie_of_match),
408 },
409};
410
411module_platform_driver(ks_pcie_driver);
412
413MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
414MODULE_DESCRIPTION("Keystone PCIe host controller driver");
415MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
new file mode 100644
index 000000000000..1fc1fceede9e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.h
@@ -0,0 +1,58 @@
1/*
2 * Keystone PCI Controller's common includes
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#define MAX_LEGACY_IRQS 4
16#define MAX_MSI_HOST_IRQS 8
17#define MAX_LEGACY_HOST_IRQS 4
18
19struct keystone_pcie {
20 struct clk *clk;
21 struct pcie_port pp;
22 /* PCI Device ID */
23 u32 device_id;
24 int num_legacy_host_irqs;
25 int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
26 struct device_node *legacy_intc_np;
27
28 int num_msi_host_irqs;
29 int msi_host_irqs[MAX_MSI_HOST_IRQS];
30 struct device_node *msi_intc_np;
31 struct irq_domain *legacy_irq_domain;
32
33 /* Application register space */
34 void __iomem *va_app_base;
35 struct resource app;
36};
37
38/* Keystone DW specific MSI controller APIs/definitions */
39void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
40u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
41
42/* Keystone specific PCI controller APIs */
43void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
44void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
45int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
46 struct device_node *msi_intc_np);
47int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
48 unsigned int devfn, int where, int size, u32 val);
49int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
50 unsigned int devfn, int where, int size, u32 *val);
51void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
52int ks_dw_pcie_link_up(struct pcie_port *pp);
53void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
54void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
55void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
56void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
57int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
58 struct msi_chip *chip);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index a8c6f1a92e0f..b1315e197ffb 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -873,7 +873,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
873 rangesz = pna + na + ns; 873 rangesz = pna + na + ns;
874 nranges = rlen / sizeof(__be32) / rangesz; 874 nranges = rlen / sizeof(__be32) / rangesz;
875 875
876 for (i = 0; i < nranges; i++) { 876 for (i = 0; i < nranges; i++, range += rangesz) {
877 u32 flags = of_read_number(range, 1); 877 u32 flags = of_read_number(range, 1);
878 u32 slot = of_read_number(range + 1, 1); 878 u32 slot = of_read_number(range + 1, 1);
879 u64 cpuaddr = of_read_number(range + na, pna); 879 u64 cpuaddr = of_read_number(range + na, pna);
@@ -883,14 +883,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
883 rtype = IORESOURCE_IO; 883 rtype = IORESOURCE_IO;
884 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) 884 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
885 rtype = IORESOURCE_MEM; 885 rtype = IORESOURCE_MEM;
886 else
887 continue;
886 888
887 if (slot == PCI_SLOT(devfn) && type == rtype) { 889 if (slot == PCI_SLOT(devfn) && type == rtype) {
888 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); 890 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
889 *attr = DT_CPUADDR_TO_ATTR(cpuaddr); 891 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
890 return 0; 892 return 0;
891 } 893 }
892
893 range += rangesz;
894 } 894 }
895 895
896 return -ENOENT; 896 return -ENOENT;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 0fb0fdb223d5..3d43874319be 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -38,6 +38,7 @@
38#include <linux/of_pci.h> 38#include <linux/of_pci.h>
39#include <linux/of_platform.h> 39#include <linux/of_platform.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/phy/phy.h>
41#include <linux/platform_device.h> 42#include <linux/platform_device.h>
42#include <linux/reset.h> 43#include <linux/reset.h>
43#include <linux/sizes.h> 44#include <linux/sizes.h>
@@ -115,13 +116,20 @@
115 116
116#define AFI_INTR_CODE 0xb8 117#define AFI_INTR_CODE 0xb8
117#define AFI_INTR_CODE_MASK 0xf 118#define AFI_INTR_CODE_MASK 0xf
118#define AFI_INTR_AXI_SLAVE_ERROR 1 119#define AFI_INTR_INI_SLAVE_ERROR 1
119#define AFI_INTR_AXI_DECODE_ERROR 2 120#define AFI_INTR_INI_DECODE_ERROR 2
120#define AFI_INTR_TARGET_ABORT 3 121#define AFI_INTR_TARGET_ABORT 3
121#define AFI_INTR_MASTER_ABORT 4 122#define AFI_INTR_MASTER_ABORT 4
122#define AFI_INTR_INVALID_WRITE 5 123#define AFI_INTR_INVALID_WRITE 5
123#define AFI_INTR_LEGACY 6 124#define AFI_INTR_LEGACY 6
124#define AFI_INTR_FPCI_DECODE_ERROR 7 125#define AFI_INTR_FPCI_DECODE_ERROR 7
126#define AFI_INTR_AXI_DECODE_ERROR 8
127#define AFI_INTR_FPCI_TIMEOUT 9
128#define AFI_INTR_PE_PRSNT_SENSE 10
129#define AFI_INTR_PE_CLKREQ_SENSE 11
130#define AFI_INTR_CLKCLAMP_SENSE 12
131#define AFI_INTR_RDY4PD_SENSE 13
132#define AFI_INTR_P2P_ERROR 14
125 133
126#define AFI_INTR_SIGNATURE 0xbc 134#define AFI_INTR_SIGNATURE 0xbc
127#define AFI_UPPER_FPCI_ADDRESS 0xc0 135#define AFI_UPPER_FPCI_ADDRESS 0xc0
@@ -152,8 +160,10 @@
152#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) 160#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
153#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) 161#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
154#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) 162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
155#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) 164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
156#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) 165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
166#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
157#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) 167#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
158 168
159#define AFI_FUSE 0x104 169#define AFI_FUSE 0x104
@@ -165,12 +175,21 @@
165#define AFI_PEX_CTRL_RST (1 << 0) 175#define AFI_PEX_CTRL_RST (1 << 0)
166#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) 176#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
167#define AFI_PEX_CTRL_REFCLK_EN (1 << 3) 177#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
178#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
179
180#define AFI_PLLE_CONTROL 0x160
181#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
168 183
169#define AFI_PEXBIAS_CTRL_0 0x168 184#define AFI_PEXBIAS_CTRL_0 0x168
170 185
171#define RP_VEND_XP 0x00000F00 186#define RP_VEND_XP 0x00000F00
172#define RP_VEND_XP_DL_UP (1 << 30) 187#define RP_VEND_XP_DL_UP (1 << 30)
173 188
189#define RP_PRIV_MISC 0x00000FE0
190#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192
174#define RP_LINK_CONTROL_STATUS 0x00000090 193#define RP_LINK_CONTROL_STATUS 0x00000090
175#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 194#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
176#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 195#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
@@ -197,6 +216,7 @@
197 216
198#define PADS_REFCLK_CFG0 0x000000C8 217#define PADS_REFCLK_CFG0 0x000000C8
199#define PADS_REFCLK_CFG1 0x000000CC 218#define PADS_REFCLK_CFG1 0x000000CC
219#define PADS_REFCLK_BIAS 0x000000D0
200 220
201/* 221/*
202 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit 222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
@@ -236,6 +256,7 @@ struct tegra_pcie_soc_data {
236 bool has_pex_bias_ctrl; 256 bool has_pex_bias_ctrl;
237 bool has_intr_prsnt_sense; 257 bool has_intr_prsnt_sense;
238 bool has_cml_clk; 258 bool has_cml_clk;
259 bool has_gen2;
239}; 260};
240 261
241static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) 262static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
@@ -253,6 +274,7 @@ struct tegra_pcie {
253 struct list_head buses; 274 struct list_head buses;
254 struct resource *cs; 275 struct resource *cs;
255 276
277 struct resource all;
256 struct resource io; 278 struct resource io;
257 struct resource mem; 279 struct resource mem;
258 struct resource prefetch; 280 struct resource prefetch;
@@ -267,6 +289,8 @@ struct tegra_pcie {
267 struct reset_control *afi_rst; 289 struct reset_control *afi_rst;
268 struct reset_control *pcie_xrst; 290 struct reset_control *pcie_xrst;
269 291
292 struct phy *phy;
293
270 struct tegra_msi msi; 294 struct tegra_msi msi;
271 295
272 struct list_head ports; 296 struct list_head ports;
@@ -382,7 +406,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
382 for (i = 0; i < 16; i++) { 406 for (i = 0; i < 16; i++) {
383 unsigned long virt = (unsigned long)bus->area->addr + 407 unsigned long virt = (unsigned long)bus->area->addr +
384 i * SZ_64K; 408 i * SZ_64K;
385 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K; 409 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
386 410
387 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot); 411 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
388 if (err < 0) { 412 if (err < 0) {
@@ -561,6 +585,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
561 if (soc->has_pex_clkreq_en) 585 if (soc->has_pex_clkreq_en)
562 value |= AFI_PEX_CTRL_CLKREQ_EN; 586 value |= AFI_PEX_CTRL_CLKREQ_EN;
563 587
588 value |= AFI_PEX_CTRL_OVERRIDE_EN;
589
564 afi_writel(port->pcie, value, ctrl); 590 afi_writel(port->pcie, value, ctrl);
565 591
566 tegra_pcie_port_reset(port); 592 tegra_pcie_port_reset(port);
@@ -568,6 +594,7 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
568 594
569static void tegra_pcie_port_disable(struct tegra_pcie_port *port) 595static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
570{ 596{
597 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
571 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); 598 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
572 unsigned long value; 599 unsigned long value;
573 600
@@ -578,6 +605,10 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
578 605
579 /* disable reference clock */ 606 /* disable reference clock */
580 value = afi_readl(port->pcie, ctrl); 607 value = afi_readl(port->pcie, ctrl);
608
609 if (soc->has_pex_clkreq_en)
610 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
611
581 value &= ~AFI_PEX_CTRL_REFCLK_EN; 612 value &= ~AFI_PEX_CTRL_REFCLK_EN;
582 afi_writel(port->pcie, value, ctrl); 613 afi_writel(port->pcie, value, ctrl);
583} 614}
@@ -626,13 +657,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
626static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) 657static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
627{ 658{
628 struct tegra_pcie *pcie = sys_to_pcie(sys); 659 struct tegra_pcie *pcie = sys_to_pcie(sys);
660 int err;
661 phys_addr_t io_start;
662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0)
665 return err;
666
667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
668 if (err)
669 return err;
670
671 io_start = pci_pio_to_address(pcie->io.start);
629 672
630 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
631 pci_add_resource_offset(&sys->resources, &pcie->prefetch, 674 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
632 sys->mem_offset); 675 sys->mem_offset);
633 pci_add_resource(&sys->resources, &pcie->busn); 676 pci_add_resource(&sys->resources, &pcie->busn);
634 677
635 pci_ioremap_io(nr * SZ_64K, pcie->io.start); 678 pci_ioremap_io(nr * SZ_64K, io_start);
636 679
637 return 1; 680 return 1;
638} 681}
@@ -684,9 +727,15 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
684 "Target abort", 727 "Target abort",
685 "Master abort", 728 "Master abort",
686 "Invalid write", 729 "Invalid write",
730 "Legacy interrupt",
687 "Response decoding error", 731 "Response decoding error",
688 "AXI response decoding error", 732 "AXI response decoding error",
689 "Transaction timeout", 733 "Transaction timeout",
734 "Slot present pin change",
735 "Slot clock request change",
736 "TMS clock ramp change",
737 "TMS ready for power down",
738 "Peer2Peer error",
690 }; 739 };
691 struct tegra_pcie *pcie = arg; 740 struct tegra_pcie *pcie = arg;
692 u32 code, signature; 741 u32 code, signature;
@@ -737,6 +786,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
737static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) 786static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
738{ 787{
739 u32 fpci_bar, size, axi_address; 788 u32 fpci_bar, size, axi_address;
789 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
740 790
741 /* Bar 0: type 1 extended configuration space */ 791 /* Bar 0: type 1 extended configuration space */
742 fpci_bar = 0xfe100000; 792 fpci_bar = 0xfe100000;
@@ -749,7 +799,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
749 /* Bar 1: downstream IO bar */ 799 /* Bar 1: downstream IO bar */
750 fpci_bar = 0xfdfc0000; 800 fpci_bar = 0xfdfc0000;
751 size = resource_size(&pcie->io); 801 size = resource_size(&pcie->io);
752 axi_address = pcie->io.start; 802 axi_address = io_start;
753 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); 803 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
754 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); 804 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
755 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); 805 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -792,30 +842,27 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
792 afi_writel(pcie, 0, AFI_MSI_BAR_SZ); 842 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
793} 843}
794 844
795static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) 845static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
796{ 846{
797 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 847 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
798 struct tegra_pcie_port *port; 848 u32 value;
799 unsigned int timeout;
800 unsigned long value;
801 849
802 /* power down PCIe slot clock bias pad */ 850 timeout = jiffies + msecs_to_jiffies(timeout);
803 if (soc->has_pex_bias_ctrl)
804 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
805 851
806 /* configure mode and disable all ports */ 852 while (time_before(jiffies, timeout)) {
807 value = afi_readl(pcie, AFI_PCIE_CONFIG); 853 value = pads_readl(pcie, soc->pads_pll_ctl);
808 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; 854 if (value & PADS_PLL_CTL_LOCKDET)
809 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; 855 return 0;
810 856 }
811 list_for_each_entry(port, &pcie->ports, list)
812 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
813 857
814 afi_writel(pcie, value, AFI_PCIE_CONFIG); 858 return -ETIMEDOUT;
859}
815 860
816 value = afi_readl(pcie, AFI_FUSE); 861static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
817 value |= AFI_FUSE_PCIE_T0_GEN2_DIS; 862{
818 afi_writel(pcie, value, AFI_FUSE); 863 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
864 u32 value;
865 int err;
819 866
820 /* initialize internal PHY, enable up to 16 PCIE lanes */ 867 /* initialize internal PHY, enable up to 16 PCIE lanes */
821 pads_writel(pcie, 0x0, PADS_CTL_SEL); 868 pads_writel(pcie, 0x0, PADS_CTL_SEL);
@@ -834,6 +881,13 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
834 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; 881 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
835 pads_writel(pcie, value, soc->pads_pll_ctl); 882 pads_writel(pcie, value, soc->pads_pll_ctl);
836 883
884 /* reset PLL */
885 value = pads_readl(pcie, soc->pads_pll_ctl);
886 value &= ~PADS_PLL_CTL_RST_B4SM;
887 pads_writel(pcie, value, soc->pads_pll_ctl);
888
889 usleep_range(20, 100);
890
837 /* take PLL out of reset */ 891 /* take PLL out of reset */
838 value = pads_readl(pcie, soc->pads_pll_ctl); 892 value = pads_readl(pcie, soc->pads_pll_ctl);
839 value |= PADS_PLL_CTL_RST_B4SM; 893 value |= PADS_PLL_CTL_RST_B4SM;
@@ -846,15 +900,11 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
846 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1); 900 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
847 901
848 /* wait for the PLL to lock */ 902 /* wait for the PLL to lock */
849 timeout = 300; 903 err = tegra_pcie_pll_wait(pcie, 500);
850 do { 904 if (err < 0) {
851 value = pads_readl(pcie, soc->pads_pll_ctl); 905 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
852 usleep_range(1000, 2000); 906 return err;
853 if (--timeout == 0) { 907 }
854 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
855 return -EBUSY;
856 }
857 } while (!(value & PADS_PLL_CTL_LOCKDET));
858 908
859 /* turn off IDDQ override */ 909 /* turn off IDDQ override */
860 value = pads_readl(pcie, PADS_CTL); 910 value = pads_readl(pcie, PADS_CTL);
@@ -866,6 +916,58 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
866 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; 916 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
867 pads_writel(pcie, value, PADS_CTL); 917 pads_writel(pcie, value, PADS_CTL);
868 918
919 return 0;
920}
921
922static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
923{
924 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
925 struct tegra_pcie_port *port;
926 unsigned long value;
927 int err;
928
929 /* enable PLL power down */
930 if (pcie->phy) {
931 value = afi_readl(pcie, AFI_PLLE_CONTROL);
932 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
933 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
934 afi_writel(pcie, value, AFI_PLLE_CONTROL);
935 }
936
937 /* power down PCIe slot clock bias pad */
938 if (soc->has_pex_bias_ctrl)
939 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
940
941 /* configure mode and disable all ports */
942 value = afi_readl(pcie, AFI_PCIE_CONFIG);
943 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
944 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
945
946 list_for_each_entry(port, &pcie->ports, list)
947 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
948
949 afi_writel(pcie, value, AFI_PCIE_CONFIG);
950
951 if (soc->has_gen2) {
952 value = afi_readl(pcie, AFI_FUSE);
953 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
954 afi_writel(pcie, value, AFI_FUSE);
955 } else {
956 value = afi_readl(pcie, AFI_FUSE);
957 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
958 afi_writel(pcie, value, AFI_FUSE);
959 }
960
961 if (!pcie->phy)
962 err = tegra_pcie_phy_enable(pcie);
963 else
964 err = phy_power_on(pcie->phy);
965
966 if (err < 0) {
967 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
968 return err;
969 }
970
869 /* take the PCIe interface module out of reset */ 971 /* take the PCIe interface module out of reset */
870 reset_control_deassert(pcie->pcie_xrst); 972 reset_control_deassert(pcie->pcie_xrst);
871 973
@@ -899,6 +1001,10 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
899 1001
900 /* TODO: disable and unprepare clocks? */ 1002 /* TODO: disable and unprepare clocks? */
901 1003
1004 err = phy_power_off(pcie->phy);
1005 if (err < 0)
1006 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
1007
902 reset_control_assert(pcie->pcie_xrst); 1008 reset_control_assert(pcie->pcie_xrst);
903 reset_control_assert(pcie->afi_rst); 1009 reset_control_assert(pcie->afi_rst);
904 reset_control_assert(pcie->pex_rst); 1010 reset_control_assert(pcie->pex_rst);
@@ -1020,6 +1126,19 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1020 return err; 1126 return err;
1021 } 1127 }
1022 1128
1129 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1130 if (IS_ERR(pcie->phy)) {
1131 err = PTR_ERR(pcie->phy);
1132 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1133 return err;
1134 }
1135
1136 err = phy_init(pcie->phy);
1137 if (err < 0) {
1138 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1139 return err;
1140 }
1141
1023 err = tegra_pcie_power_on(pcie); 1142 err = tegra_pcie_power_on(pcie);
1024 if (err) { 1143 if (err) {
1025 dev_err(&pdev->dev, "failed to power up: %d\n", err); 1144 dev_err(&pdev->dev, "failed to power up: %d\n", err);
@@ -1078,10 +1197,17 @@ poweroff:
1078 1197
1079static int tegra_pcie_put_resources(struct tegra_pcie *pcie) 1198static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1080{ 1199{
1200 int err;
1201
1081 if (pcie->irq > 0) 1202 if (pcie->irq > 0)
1082 free_irq(pcie->irq, pcie); 1203 free_irq(pcie->irq, pcie);
1083 1204
1084 tegra_pcie_power_off(pcie); 1205 tegra_pcie_power_off(pcie);
1206
1207 err = phy_exit(pcie->phy);
1208 if (err < 0)
1209 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1210
1085 return 0; 1211 return 0;
1086} 1212}
1087 1213
@@ -1170,8 +1296,10 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1170 return hwirq; 1296 return hwirq;
1171 1297
1172 irq = irq_create_mapping(msi->domain, hwirq); 1298 irq = irq_create_mapping(msi->domain, hwirq);
1173 if (!irq) 1299 if (!irq) {
1300 tegra_msi_free(msi, hwirq);
1174 return -EINVAL; 1301 return -EINVAL;
1302 }
1175 1303
1176 irq_set_msi_desc(irq, desc); 1304 irq_set_msi_desc(irq, desc);
1177 1305
@@ -1189,8 +1317,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1189{ 1317{
1190 struct tegra_msi *msi = to_tegra_msi(chip); 1318 struct tegra_msi *msi = to_tegra_msi(chip);
1191 struct irq_data *d = irq_get_irq_data(irq); 1319 struct irq_data *d = irq_get_irq_data(irq);
1320 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1192 1321
1193 tegra_msi_free(msi, d->hwirq); 1322 irq_dispose_mapping(irq);
1323 tegra_msi_free(msi, hwirq);
1194} 1324}
1195 1325
1196static struct irq_chip tegra_msi_irq_chip = { 1326static struct irq_chip tegra_msi_irq_chip = {
@@ -1327,7 +1457,19 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1327{ 1457{
1328 struct device_node *np = pcie->dev->of_node; 1458 struct device_node *np = pcie->dev->of_node;
1329 1459
1330 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1460 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1461 switch (lanes) {
1462 case 0x0000104:
1463 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1464 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1465 return 0;
1466
1467 case 0x0000102:
1468 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1469 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1470 return 0;
1471 }
1472 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1331 switch (lanes) { 1473 switch (lanes) {
1332 case 0x00000204: 1474 case 0x00000204:
1333 dev_info(pcie->dev, "4x1, 2x1 configuration\n"); 1475 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
@@ -1435,7 +1577,23 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1435 struct device_node *np = pcie->dev->of_node; 1577 struct device_node *np = pcie->dev->of_node;
1436 unsigned int i = 0; 1578 unsigned int i = 0;
1437 1579
1438 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1580 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1581 pcie->num_supplies = 7;
1582
1583 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1584 sizeof(*pcie->supplies),
1585 GFP_KERNEL);
1586 if (!pcie->supplies)
1587 return -ENOMEM;
1588
1589 pcie->supplies[i++].supply = "avddio-pex";
1590 pcie->supplies[i++].supply = "dvddio-pex";
1591 pcie->supplies[i++].supply = "avdd-pex-pll";
1592 pcie->supplies[i++].supply = "hvdd-pex";
1593 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1594 pcie->supplies[i++].supply = "vddio-pex-ctl";
1595 pcie->supplies[i++].supply = "avdd-pll-erefe";
1596 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1439 bool need_pexa = false, need_pexb = false; 1597 bool need_pexa = false, need_pexb = false;
1440 1598
1441 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ 1599 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
@@ -1514,32 +1672,50 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1514 struct resource res; 1672 struct resource res;
1515 int err; 1673 int err;
1516 1674
1675 memset(&pcie->all, 0, sizeof(pcie->all));
1676 pcie->all.flags = IORESOURCE_MEM;
1677 pcie->all.name = np->full_name;
1678 pcie->all.start = ~0;
1679 pcie->all.end = 0;
1680
1517 if (of_pci_range_parser_init(&parser, np)) { 1681 if (of_pci_range_parser_init(&parser, np)) {
1518 dev_err(pcie->dev, "missing \"ranges\" property\n"); 1682 dev_err(pcie->dev, "missing \"ranges\" property\n");
1519 return -EINVAL; 1683 return -EINVAL;
1520 } 1684 }
1521 1685
1522 for_each_of_pci_range(&parser, &range) { 1686 for_each_of_pci_range(&parser, &range) {
1523 of_pci_range_to_resource(&range, np, &res); 1687 err = of_pci_range_to_resource(&range, np, &res);
1688 if (err < 0)
1689 return err;
1524 1690
1525 switch (res.flags & IORESOURCE_TYPE_BITS) { 1691 switch (res.flags & IORESOURCE_TYPE_BITS) {
1526 case IORESOURCE_IO: 1692 case IORESOURCE_IO:
1527 memcpy(&pcie->io, &res, sizeof(res)); 1693 memcpy(&pcie->io, &res, sizeof(res));
1528 pcie->io.name = "I/O"; 1694 pcie->io.name = np->full_name;
1529 break; 1695 break;
1530 1696
1531 case IORESOURCE_MEM: 1697 case IORESOURCE_MEM:
1532 if (res.flags & IORESOURCE_PREFETCH) { 1698 if (res.flags & IORESOURCE_PREFETCH) {
1533 memcpy(&pcie->prefetch, &res, sizeof(res)); 1699 memcpy(&pcie->prefetch, &res, sizeof(res));
1534 pcie->prefetch.name = "PREFETCH"; 1700 pcie->prefetch.name = "prefetchable";
1535 } else { 1701 } else {
1536 memcpy(&pcie->mem, &res, sizeof(res)); 1702 memcpy(&pcie->mem, &res, sizeof(res));
1537 pcie->mem.name = "MEM"; 1703 pcie->mem.name = "non-prefetchable";
1538 } 1704 }
1539 break; 1705 break;
1540 } 1706 }
1707
1708 if (res.start <= pcie->all.start)
1709 pcie->all.start = res.start;
1710
1711 if (res.end >= pcie->all.end)
1712 pcie->all.end = res.end;
1541 } 1713 }
1542 1714
1715 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1716 if (err < 0)
1717 return err;
1718
1543 err = of_pci_parse_bus_range(np, &pcie->busn); 1719 err = of_pci_parse_bus_range(np, &pcie->busn);
1544 if (err < 0) { 1720 if (err < 0) {
1545 dev_err(pcie->dev, "failed to parse ranges property: %d\n", 1721 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -1641,6 +1817,12 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1641 unsigned int retries = 3; 1817 unsigned int retries = 3;
1642 unsigned long value; 1818 unsigned long value;
1643 1819
1820 /* override presence detection */
1821 value = readl(port->base + RP_PRIV_MISC);
1822 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1823 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1824 writel(value, port->base + RP_PRIV_MISC);
1825
1644 do { 1826 do {
1645 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; 1827 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1646 1828
@@ -1721,6 +1903,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1721 .has_pex_bias_ctrl = false, 1903 .has_pex_bias_ctrl = false,
1722 .has_intr_prsnt_sense = false, 1904 .has_intr_prsnt_sense = false,
1723 .has_cml_clk = false, 1905 .has_cml_clk = false,
1906 .has_gen2 = false,
1724}; 1907};
1725 1908
1726static const struct tegra_pcie_soc_data tegra30_pcie_data = { 1909static const struct tegra_pcie_soc_data tegra30_pcie_data = {
@@ -1732,9 +1915,23 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1732 .has_pex_bias_ctrl = true, 1915 .has_pex_bias_ctrl = true,
1733 .has_intr_prsnt_sense = true, 1916 .has_intr_prsnt_sense = true,
1734 .has_cml_clk = true, 1917 .has_cml_clk = true,
1918 .has_gen2 = false,
1919};
1920
1921static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1922 .num_ports = 2,
1923 .msi_base_shift = 8,
1924 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1925 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1926 .has_pex_clkreq_en = true,
1927 .has_pex_bias_ctrl = true,
1928 .has_intr_prsnt_sense = true,
1929 .has_cml_clk = true,
1930 .has_gen2 = true,
1735}; 1931};
1736 1932
1737static const struct of_device_id tegra_pcie_of_match[] = { 1933static const struct of_device_id tegra_pcie_of_match[] = {
1934 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1738 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data }, 1935 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1739 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data }, 1936 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1740 { }, 1937 { },
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
new file mode 100644
index 000000000000..9ecabfa8c634
--- /dev/null
+++ b/drivers/pci/host/pci-xgene.c
@@ -0,0 +1,659 @@
1/**
2 * APM X-Gene PCIe Driver
3 *
4 * Copyright (c) 2014 Applied Micro Circuits Corporation.
5 *
6 * Author: Tanmay Inamdar <tinamdar@apm.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19#include <linux/clk-private.h>
20#include <linux/delay.h>
21#include <linux/io.h>
22#include <linux/jiffies.h>
23#include <linux/memblock.h>
24#include <linux/module.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_pci.h>
29#include <linux/pci.h>
30#include <linux/platform_device.h>
31#include <linux/slab.h>
32
33#define PCIECORE_CTLANDSTATUS 0x50
34#define PIM1_1L 0x80
35#define IBAR2 0x98
36#define IR2MSK 0x9c
37#define PIM2_1L 0xa0
38#define IBAR3L 0xb4
39#define IR3MSKL 0xbc
40#define PIM3_1L 0xc4
41#define OMR1BARL 0x100
42#define OMR2BARL 0x118
43#define OMR3BARL 0x130
44#define CFGBARL 0x154
45#define CFGBARH 0x158
46#define CFGCTL 0x15c
47#define RTDID 0x160
48#define BRIDGE_CFG_0 0x2000
49#define BRIDGE_CFG_4 0x2010
50#define BRIDGE_STATUS_0 0x2600
51
52#define LINK_UP_MASK 0x00000100
53#define AXI_EP_CFG_ACCESS 0x10000
54#define EN_COHERENCY 0xF0000000
55#define EN_REG 0x00000001
56#define OB_LO_IO 0x00000002
57#define XGENE_PCIE_VENDORID 0x10E8
58#define XGENE_PCIE_DEVICEID 0xE004
59#define SZ_1T (SZ_1G*1024ULL)
60#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
61
62struct xgene_pcie_port {
63 struct device_node *node;
64 struct device *dev;
65 struct clk *clk;
66 void __iomem *csr_base;
67 void __iomem *cfg_base;
68 unsigned long cfg_addr;
69 bool link_up;
70};
71
72static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
73{
74 return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
75}
76
77/* PCIe Configuration Out/In */
78static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
79{
80 writel(val, addr + offset);
81}
82
83static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
84{
85 u32 val32 = readl(addr + (offset & ~0x3));
86
87 switch (offset & 0x3) {
88 case 2:
89 val32 &= ~0xFFFF0000;
90 val32 |= (u32)val << 16;
91 break;
92 case 0:
93 default:
94 val32 &= ~0xFFFF;
95 val32 |= val;
96 break;
97 }
98 writel(val32, addr + (offset & ~0x3));
99}
100
101static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
102{
103 u32 val32 = readl(addr + (offset & ~0x3));
104
105 switch (offset & 0x3) {
106 case 0:
107 val32 &= ~0xFF;
108 val32 |= val;
109 break;
110 case 1:
111 val32 &= ~0xFF00;
112 val32 |= (u32)val << 8;
113 break;
114 case 2:
115 val32 &= ~0xFF0000;
116 val32 |= (u32)val << 16;
117 break;
118 case 3:
119 default:
120 val32 &= ~0xFF000000;
121 val32 |= (u32)val << 24;
122 break;
123 }
124 writel(val32, addr + (offset & ~0x3));
125}
126
127static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
128{
129 *val = readl(addr + offset);
130}
131
132static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
133{
134 *val = readl(addr + (offset & ~0x3));
135
136 switch (offset & 0x3) {
137 case 2:
138 *val >>= 16;
139 break;
140 }
141
142 *val &= 0xFFFF;
143}
144
145static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
146{
147 *val = readl(addr + (offset & ~0x3));
148
149 switch (offset & 0x3) {
150 case 3:
151 *val = *val >> 24;
152 break;
153 case 2:
154 *val = *val >> 16;
155 break;
156 case 1:
157 *val = *val >> 8;
158 break;
159 }
160 *val &= 0xFF;
161}
162
163/*
164 * When the address bit [17:16] is 2'b01, the Configuration access will be
165 * treated as Type 1 and it will be forwarded to external PCIe device.
166 */
167static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
168{
169 struct xgene_pcie_port *port = bus->sysdata;
170
171 if (bus->number >= (bus->primary + 1))
172 return port->cfg_base + AXI_EP_CFG_ACCESS;
173
174 return port->cfg_base;
175}
176
177/*
178 * For Configuration request, RTDID register is used as Bus Number,
179 * Device Number and Function number of the header fields.
180 */
181static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
182{
183 struct xgene_pcie_port *port = bus->sysdata;
184 unsigned int b, d, f;
185 u32 rtdid_val = 0;
186
187 b = bus->number;
188 d = PCI_SLOT(devfn);
189 f = PCI_FUNC(devfn);
190
191 if (!pci_is_root_bus(bus))
192 rtdid_val = (b << 8) | (d << 3) | f;
193
194 writel(rtdid_val, port->csr_base + RTDID);
195 /* read the register back to ensure flush */
196 readl(port->csr_base + RTDID);
197}
198
199/*
200 * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
201 * the translation from PCI bus to native BUS. Entire DDR region
202 * is mapped into PCIe space using these registers, so it can be
203 * reached by DMA from EP devices. The BAR0/1 of bridge should be
204 * hidden during enumeration to avoid the sizing and resource allocation
205 * by PCIe core.
206 */
207static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
208{
209 if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
210 (offset == PCI_BASE_ADDRESS_1)))
211 return true;
212
213 return false;
214}
215
216static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
217 int offset, int len, u32 *val)
218{
219 struct xgene_pcie_port *port = bus->sysdata;
220 void __iomem *addr;
221
222 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
223 return PCIBIOS_DEVICE_NOT_FOUND;
224
225 if (xgene_pcie_hide_rc_bars(bus, offset)) {
226 *val = 0;
227 return PCIBIOS_SUCCESSFUL;
228 }
229
230 xgene_pcie_set_rtdid_reg(bus, devfn);
231 addr = xgene_pcie_get_cfg_base(bus);
232 switch (len) {
233 case 1:
234 xgene_pcie_cfg_in8(addr, offset, val);
235 break;
236 case 2:
237 xgene_pcie_cfg_in16(addr, offset, val);
238 break;
239 default:
240 xgene_pcie_cfg_in32(addr, offset, val);
241 break;
242 }
243
244 return PCIBIOS_SUCCESSFUL;
245}
246
247static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
248 int offset, int len, u32 val)
249{
250 struct xgene_pcie_port *port = bus->sysdata;
251 void __iomem *addr;
252
253 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
254 return PCIBIOS_DEVICE_NOT_FOUND;
255
256 if (xgene_pcie_hide_rc_bars(bus, offset))
257 return PCIBIOS_SUCCESSFUL;
258
259 xgene_pcie_set_rtdid_reg(bus, devfn);
260 addr = xgene_pcie_get_cfg_base(bus);
261 switch (len) {
262 case 1:
263 xgene_pcie_cfg_out8(addr, offset, (u8)val);
264 break;
265 case 2:
266 xgene_pcie_cfg_out16(addr, offset, (u16)val);
267 break;
268 default:
269 xgene_pcie_cfg_out32(addr, offset, val);
270 break;
271 }
272
273 return PCIBIOS_SUCCESSFUL;
274}
275
276static struct pci_ops xgene_pcie_ops = {
277 .read = xgene_pcie_read_config,
278 .write = xgene_pcie_write_config
279};
280
281static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
282 u32 flags, u64 size)
283{
284 u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
285 u32 val32 = 0;
286 u32 val;
287
288 val32 = readl(csr_base + addr);
289 val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
290 writel(val, csr_base + addr);
291
292 val32 = readl(csr_base + addr + 0x04);
293 val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
294 writel(val, csr_base + addr + 0x04);
295
296 val32 = readl(csr_base + addr + 0x04);
297 val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
298 writel(val, csr_base + addr + 0x04);
299
300 val32 = readl(csr_base + addr + 0x08);
301 val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
302 writel(val, csr_base + addr + 0x08);
303
304 return mask;
305}
306
307static void xgene_pcie_linkup(struct xgene_pcie_port *port,
308 u32 *lanes, u32 *speed)
309{
310 void __iomem *csr_base = port->csr_base;
311 u32 val32;
312
313 port->link_up = false;
314 val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
315 if (val32 & LINK_UP_MASK) {
316 port->link_up = true;
317 *speed = PIPE_PHY_RATE_RD(val32);
318 val32 = readl(csr_base + BRIDGE_STATUS_0);
319 *lanes = val32 >> 26;
320 }
321}
322
323static int xgene_pcie_init_port(struct xgene_pcie_port *port)
324{
325 int rc;
326
327 port->clk = clk_get(port->dev, NULL);
328 if (IS_ERR(port->clk)) {
329 dev_err(port->dev, "clock not available\n");
330 return -ENODEV;
331 }
332
333 rc = clk_prepare_enable(port->clk);
334 if (rc) {
335 dev_err(port->dev, "clock enable failed\n");
336 return rc;
337 }
338
339 return 0;
340}
341
342static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
343 struct platform_device *pdev)
344{
345 struct resource *res;
346
347 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
348 port->csr_base = devm_ioremap_resource(port->dev, res);
349 if (IS_ERR(port->csr_base))
350 return PTR_ERR(port->csr_base);
351
352 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
353 port->cfg_base = devm_ioremap_resource(port->dev, res);
354 if (IS_ERR(port->cfg_base))
355 return PTR_ERR(port->cfg_base);
356 port->cfg_addr = res->start;
357
358 return 0;
359}
360
361static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
362 struct resource *res, u32 offset,
363 u64 cpu_addr, u64 pci_addr)
364{
365 void __iomem *base = port->csr_base + offset;
366 resource_size_t size = resource_size(res);
367 u64 restype = resource_type(res);
368 u64 mask = 0;
369 u32 min_size;
370 u32 flag = EN_REG;
371
372 if (restype == IORESOURCE_MEM) {
373 min_size = SZ_128M;
374 } else {
375 min_size = 128;
376 flag |= OB_LO_IO;
377 }
378
379 if (size >= min_size)
380 mask = ~(size - 1) | flag;
381 else
382 dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
383 (u64)size, min_size);
384
385 writel(lower_32_bits(cpu_addr), base);
386 writel(upper_32_bits(cpu_addr), base + 0x04);
387 writel(lower_32_bits(mask), base + 0x08);
388 writel(upper_32_bits(mask), base + 0x0c);
389 writel(lower_32_bits(pci_addr), base + 0x10);
390 writel(upper_32_bits(pci_addr), base + 0x14);
391}
392
393static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
394{
395 writel(lower_32_bits(addr), csr_base + CFGBARL);
396 writel(upper_32_bits(addr), csr_base + CFGBARH);
397 writel(EN_REG, csr_base + CFGCTL);
398}
399
400static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
401 struct list_head *res,
402 resource_size_t io_base)
403{
404 struct pci_host_bridge_window *window;
405 struct device *dev = port->dev;
406 int ret;
407
408 list_for_each_entry(window, res, list) {
409 struct resource *res = window->res;
410 u64 restype = resource_type(res);
411
412 dev_dbg(port->dev, "%pR\n", res);
413
414 switch (restype) {
415 case IORESOURCE_IO:
416 xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
417 res->start - window->offset);
418 ret = pci_remap_iospace(res, io_base);
419 if (ret < 0)
420 return ret;
421 break;
422 case IORESOURCE_MEM:
423 xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
424 res->start - window->offset);
425 break;
426 case IORESOURCE_BUS:
427 break;
428 default:
429 dev_err(dev, "invalid resource %pR\n", res);
430 return -EINVAL;
431 }
432 }
433 xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
434
435 return 0;
436}
437
438static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
439{
440 writel(lower_32_bits(pim), addr);
441 writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
442 writel(lower_32_bits(size), addr + 0x10);
443 writel(upper_32_bits(size), addr + 0x14);
444}
445
446/*
447 * X-Gene PCIe support maximum 3 inbound memory regions
448 * This function helps to select a region based on size of region
449 */
450static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
451{
452 if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
453 *ib_reg_mask |= (1 << 1);
454 return 1;
455 }
456
457 if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
458 *ib_reg_mask |= (1 << 0);
459 return 0;
460 }
461
462 if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
463 *ib_reg_mask |= (1 << 2);
464 return 2;
465 }
466
467 return -EINVAL;
468}
469
470static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
471 struct of_pci_range *range, u8 *ib_reg_mask)
472{
473 void __iomem *csr_base = port->csr_base;
474 void __iomem *cfg_base = port->cfg_base;
475 void *bar_addr;
476 void *pim_addr;
477 u64 cpu_addr = range->cpu_addr;
478 u64 pci_addr = range->pci_addr;
479 u64 size = range->size;
480 u64 mask = ~(size - 1) | EN_REG;
481 u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
482 u32 bar_low;
483 int region;
484
485 region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
486 if (region < 0) {
487 dev_warn(port->dev, "invalid pcie dma-range config\n");
488 return;
489 }
490
491 if (range->flags & IORESOURCE_PREFETCH)
492 flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
493
494 bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
495 switch (region) {
496 case 0:
497 xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
498 bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
499 writel(bar_low, bar_addr);
500 writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
501 pim_addr = csr_base + PIM1_1L;
502 break;
503 case 1:
504 bar_addr = csr_base + IBAR2;
505 writel(bar_low, bar_addr);
506 writel(lower_32_bits(mask), csr_base + IR2MSK);
507 pim_addr = csr_base + PIM2_1L;
508 break;
509 case 2:
510 bar_addr = csr_base + IBAR3L;
511 writel(bar_low, bar_addr);
512 writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
513 writel(lower_32_bits(mask), csr_base + IR3MSKL);
514 writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
515 pim_addr = csr_base + PIM3_1L;
516 break;
517 }
518
519 xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
520}
521
522static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
523 struct device_node *node)
524{
525 const int na = 3, ns = 2;
526 int rlen;
527
528 parser->node = node;
529 parser->pna = of_n_addr_cells(node);
530 parser->np = parser->pna + na + ns;
531
532 parser->range = of_get_property(node, "dma-ranges", &rlen);
533 if (!parser->range)
534 return -ENOENT;
535 parser->end = parser->range + rlen / sizeof(__be32);
536
537 return 0;
538}
539
540static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
541{
542 struct device_node *np = port->node;
543 struct of_pci_range range;
544 struct of_pci_range_parser parser;
545 struct device *dev = port->dev;
546 u8 ib_reg_mask = 0;
547
548 if (pci_dma_range_parser_init(&parser, np)) {
549 dev_err(dev, "missing dma-ranges property\n");
550 return -EINVAL;
551 }
552
553 /* Get the dma-ranges from DT */
554 for_each_of_pci_range(&parser, &range) {
555 u64 end = range.cpu_addr + range.size - 1;
556
557 dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
558 range.flags, range.cpu_addr, end, range.pci_addr);
559 xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
560 }
561 return 0;
562}
563
564/* clear BAR configuration which was done by firmware */
565static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
566{
567 int i;
568
569 for (i = PIM1_1L; i <= CFGCTL; i += 4)
570 writel(0x0, port->csr_base + i);
571}
572
573static int xgene_pcie_setup(struct xgene_pcie_port *port,
574 struct list_head *res,
575 resource_size_t io_base)
576{
577 u32 val, lanes = 0, speed = 0;
578 int ret;
579
580 xgene_pcie_clear_config(port);
581
582 /* setup the vendor and device IDs correctly */
583 val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
584 writel(val, port->csr_base + BRIDGE_CFG_0);
585
586 ret = xgene_pcie_map_ranges(port, res, io_base);
587 if (ret)
588 return ret;
589
590 ret = xgene_pcie_parse_map_dma_ranges(port);
591 if (ret)
592 return ret;
593
594 xgene_pcie_linkup(port, &lanes, &speed);
595 if (!port->link_up)
596 dev_info(port->dev, "(rc) link down\n");
597 else
598 dev_info(port->dev, "(rc) x%d gen-%d link up\n",
599 lanes, speed + 1);
600 return 0;
601}
602
603static int xgene_pcie_probe_bridge(struct platform_device *pdev)
604{
605 struct device_node *dn = pdev->dev.of_node;
606 struct xgene_pcie_port *port;
607 resource_size_t iobase = 0;
608 struct pci_bus *bus;
609 int ret;
610 LIST_HEAD(res);
611
612 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
613 if (!port)
614 return -ENOMEM;
615 port->node = of_node_get(pdev->dev.of_node);
616 port->dev = &pdev->dev;
617
618 ret = xgene_pcie_map_reg(port, pdev);
619 if (ret)
620 return ret;
621
622 ret = xgene_pcie_init_port(port);
623 if (ret)
624 return ret;
625
626 ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
627 if (ret)
628 return ret;
629
630 ret = xgene_pcie_setup(port, &res, iobase);
631 if (ret)
632 return ret;
633
634 bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res);
635 if (!bus)
636 return -ENOMEM;
637
638 platform_set_drvdata(pdev, port);
639 return 0;
640}
641
642static const struct of_device_id xgene_pcie_match_table[] = {
643 {.compatible = "apm,xgene-pcie",},
644 {},
645};
646
647static struct platform_driver xgene_pcie_driver = {
648 .driver = {
649 .name = "xgene-pcie",
650 .owner = THIS_MODULE,
651 .of_match_table = of_match_ptr(xgene_pcie_match_table),
652 },
653 .probe = xgene_pcie_probe_bridge,
654};
655module_platform_driver(xgene_pcie_driver);
656
657MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
658MODULE_DESCRIPTION("APM X-Gene PCIe driver");
659MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 52bd3a143563..dfed00aa3ac0 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -73,6 +73,8 @@ static unsigned long global_io_offset;
73 73
74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) 74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
75{ 75{
76 BUG_ON(!sys->private_data);
77
76 return sys->private_data; 78 return sys->private_data;
77} 79}
78 80
@@ -194,30 +196,6 @@ void dw_pcie_msi_init(struct pcie_port *pp)
194 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); 196 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
195} 197}
196 198
197static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
198{
199 int flag = 1;
200
201 do {
202 pos = find_next_zero_bit(pp->msi_irq_in_use,
203 MAX_MSI_IRQS, pos);
204 /*if you have reached to the end then get out from here.*/
205 if (pos == MAX_MSI_IRQS)
206 return -ENOSPC;
207 /*
208 * Check if this position is at correct offset.nvec is always a
209 * power of two. pos0 must be nvec bit aligned.
210 */
211 if (pos % msgvec)
212 pos += msgvec - (pos % msgvec);
213 else
214 flag = 0;
215 } while (flag);
216
217 *pos0 = pos;
218 return 0;
219}
220
221static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) 199static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
222{ 200{
223 unsigned int res, bit, val; 201 unsigned int res, bit, val;
@@ -236,13 +214,14 @@ static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
236 214
237 for (i = 0; i < nvec; i++) { 215 for (i = 0; i < nvec; i++) {
238 irq_set_msi_desc_off(irq_base, i, NULL); 216 irq_set_msi_desc_off(irq_base, i, NULL);
239 clear_bit(pos + i, pp->msi_irq_in_use);
240 /* Disable corresponding interrupt on MSI controller */ 217 /* Disable corresponding interrupt on MSI controller */
241 if (pp->ops->msi_clear_irq) 218 if (pp->ops->msi_clear_irq)
242 pp->ops->msi_clear_irq(pp, pos + i); 219 pp->ops->msi_clear_irq(pp, pos + i);
243 else 220 else
244 dw_pcie_msi_clear_irq(pp, pos + i); 221 dw_pcie_msi_clear_irq(pp, pos + i);
245 } 222 }
223
224 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
246} 225}
247 226
248static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) 227static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@@ -258,31 +237,13 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
258 237
259static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 238static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
260{ 239{
261 int irq, pos0, pos1, i; 240 int irq, pos0, i;
262 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); 241 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
263 242
264 if (!pp) { 243 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
265 BUG(); 244 order_base_2(no_irqs));
266 return -EINVAL; 245 if (pos0 < 0)
267 } 246 goto no_valid_irq;
268
269 pos0 = find_first_zero_bit(pp->msi_irq_in_use,
270 MAX_MSI_IRQS);
271 if (pos0 % no_irqs) {
272 if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
273 goto no_valid_irq;
274 }
275 if (no_irqs > 1) {
276 pos1 = find_next_bit(pp->msi_irq_in_use,
277 MAX_MSI_IRQS, pos0);
278 /* there must be nvec number of consecutive free bits */
279 while ((pos1 - pos0) < no_irqs) {
280 if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
281 goto no_valid_irq;
282 pos1 = find_next_bit(pp->msi_irq_in_use,
283 MAX_MSI_IRQS, pos0);
284 }
285 }
286 247
287 irq = irq_find_mapping(pp->irq_domain, pos0); 248 irq = irq_find_mapping(pp->irq_domain, pos0);
288 if (!irq) 249 if (!irq)
@@ -300,7 +261,6 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
300 clear_irq_range(pp, irq, i, pos0); 261 clear_irq_range(pp, irq, i, pos0);
301 goto no_valid_irq; 262 goto no_valid_irq;
302 } 263 }
303 set_bit(pos0 + i, pp->msi_irq_in_use);
304 /*Enable corresponding interrupt in MSI interrupt controller */ 264 /*Enable corresponding interrupt in MSI interrupt controller */
305 if (pp->ops->msi_set_irq) 265 if (pp->ops->msi_set_irq)
306 pp->ops->msi_set_irq(pp, pos0 + i); 266 pp->ops->msi_set_irq(pp, pos0 + i);
@@ -316,69 +276,28 @@ no_valid_irq:
316 return -ENOSPC; 276 return -ENOSPC;
317} 277}
318 278
319static void clear_irq(unsigned int irq)
320{
321 unsigned int pos, nvec;
322 struct msi_desc *msi;
323 struct pcie_port *pp;
324 struct irq_data *data = irq_get_irq_data(irq);
325
326 /* get the port structure */
327 msi = irq_data_get_msi(data);
328 pp = sys_to_pcie(msi->dev->bus->sysdata);
329 if (!pp) {
330 BUG();
331 return;
332 }
333
334 /* undo what was done in assign_irq */
335 pos = data->hwirq;
336 nvec = 1 << msi->msi_attrib.multiple;
337
338 clear_irq_range(pp, irq, nvec, pos);
339
340 /* all irqs cleared; reset attributes */
341 msi->irq = 0;
342 msi->msi_attrib.multiple = 0;
343}
344
345static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, 279static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
346 struct msi_desc *desc) 280 struct msi_desc *desc)
347{ 281{
348 int irq, pos, msgvec; 282 int irq, pos;
349 u16 msg_ctr;
350 struct msi_msg msg; 283 struct msi_msg msg;
351 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 284 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
352 285
353 if (!pp) { 286 irq = assign_irq(1, desc, &pos);
354 BUG();
355 return -EINVAL;
356 }
357
358 pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
359 &msg_ctr);
360 msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
361 if (msgvec == 0)
362 msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
363 if (msgvec > 5)
364 msgvec = 0;
365
366 irq = assign_irq((1 << msgvec), desc, &pos);
367 if (irq < 0) 287 if (irq < 0)
368 return irq; 288 return irq;
369 289
370 /* 290 if (pp->ops->get_msi_addr)
371 * write_msi_msg() will update PCI_MSI_FLAGS so there is 291 msg.address_lo = pp->ops->get_msi_addr(pp);
372 * no need to explicitly call pci_write_config_word().
373 */
374 desc->msi_attrib.multiple = msgvec;
375
376 if (pp->ops->get_msi_data)
377 msg.address_lo = pp->ops->get_msi_data(pp);
378 else 292 else
379 msg.address_lo = virt_to_phys((void *)pp->msi_data); 293 msg.address_lo = virt_to_phys((void *)pp->msi_data);
380 msg.address_hi = 0x0; 294 msg.address_hi = 0x0;
381 msg.data = pos; 295
296 if (pp->ops->get_msi_data)
297 msg.data = pp->ops->get_msi_data(pp, pos);
298 else
299 msg.data = pos;
300
382 write_msi_msg(irq, &msg); 301 write_msi_msg(irq, &msg);
383 302
384 return 0; 303 return 0;
@@ -386,7 +305,11 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
386 305
387static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) 306static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
388{ 307{
389 clear_irq(irq); 308 struct irq_data *data = irq_get_irq_data(irq);
309 struct msi_desc *msi = irq_data_get_msi(data);
310 struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
311
312 clear_irq_range(pp, irq, 1, data->hwirq);
390} 313}
391 314
392static struct msi_chip dw_pcie_msi_chip = { 315static struct msi_chip dw_pcie_msi_chip = {
@@ -425,7 +348,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
425 struct resource *cfg_res; 348 struct resource *cfg_res;
426 u32 val, na, ns; 349 u32 val, na, ns;
427 const __be32 *addrp; 350 const __be32 *addrp;
428 int i, index; 351 int i, index, ret;
429 352
430 /* Find the address cell size and the number of cells in order to get 353 /* Find the address cell size and the number of cells in order to get
431 * the untranslated address. 354 * the untranslated address.
@@ -435,16 +358,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
435 358
436 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 359 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
437 if (cfg_res) { 360 if (cfg_res) {
438 pp->config.cfg0_size = resource_size(cfg_res)/2; 361 pp->cfg0_size = resource_size(cfg_res)/2;
439 pp->config.cfg1_size = resource_size(cfg_res)/2; 362 pp->cfg1_size = resource_size(cfg_res)/2;
440 pp->cfg0_base = cfg_res->start; 363 pp->cfg0_base = cfg_res->start;
441 pp->cfg1_base = cfg_res->start + pp->config.cfg0_size; 364 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
442 365
443 /* Find the untranslated configuration space address */ 366 /* Find the untranslated configuration space address */
444 index = of_property_match_string(np, "reg-names", "config"); 367 index = of_property_match_string(np, "reg-names", "config");
445 addrp = of_get_address(np, index, false, false); 368 addrp = of_get_address(np, index, NULL, NULL);
446 pp->cfg0_mod_base = of_read_number(addrp, ns); 369 pp->cfg0_mod_base = of_read_number(addrp, ns);
447 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size; 370 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
448 } else { 371 } else {
449 dev_err(pp->dev, "missing *config* reg space\n"); 372 dev_err(pp->dev, "missing *config* reg space\n");
450 } 373 }
@@ -466,9 +389,9 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
466 pp->io.end = min_t(resource_size_t, 389 pp->io.end = min_t(resource_size_t,
467 IO_SPACE_LIMIT, 390 IO_SPACE_LIMIT,
468 range.pci_addr + range.size 391 range.pci_addr + range.size
469 + global_io_offset); 392 + global_io_offset - 1);
470 pp->config.io_size = resource_size(&pp->io); 393 pp->io_size = resource_size(&pp->io);
471 pp->config.io_bus_addr = range.pci_addr; 394 pp->io_bus_addr = range.pci_addr;
472 pp->io_base = range.cpu_addr; 395 pp->io_base = range.cpu_addr;
473 396
474 /* Find the untranslated IO space address */ 397 /* Find the untranslated IO space address */
@@ -478,8 +401,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
478 if (restype == IORESOURCE_MEM) { 401 if (restype == IORESOURCE_MEM) {
479 of_pci_range_to_resource(&range, np, &pp->mem); 402 of_pci_range_to_resource(&range, np, &pp->mem);
480 pp->mem.name = "MEM"; 403 pp->mem.name = "MEM";
481 pp->config.mem_size = resource_size(&pp->mem); 404 pp->mem_size = resource_size(&pp->mem);
482 pp->config.mem_bus_addr = range.pci_addr; 405 pp->mem_bus_addr = range.pci_addr;
483 406
484 /* Find the untranslated MEM space address */ 407 /* Find the untranslated MEM space address */
485 pp->mem_mod_base = of_read_number(parser.range - 408 pp->mem_mod_base = of_read_number(parser.range -
@@ -487,19 +410,29 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
487 } 410 }
488 if (restype == 0) { 411 if (restype == 0) {
489 of_pci_range_to_resource(&range, np, &pp->cfg); 412 of_pci_range_to_resource(&range, np, &pp->cfg);
490 pp->config.cfg0_size = resource_size(&pp->cfg)/2; 413 pp->cfg0_size = resource_size(&pp->cfg)/2;
491 pp->config.cfg1_size = resource_size(&pp->cfg)/2; 414 pp->cfg1_size = resource_size(&pp->cfg)/2;
492 pp->cfg0_base = pp->cfg.start; 415 pp->cfg0_base = pp->cfg.start;
493 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; 416 pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
494 417
495 /* Find the untranslated configuration space address */ 418 /* Find the untranslated configuration space address */
496 pp->cfg0_mod_base = of_read_number(parser.range - 419 pp->cfg0_mod_base = of_read_number(parser.range -
497 parser.np + na, ns); 420 parser.np + na, ns);
498 pp->cfg1_mod_base = pp->cfg0_mod_base + 421 pp->cfg1_mod_base = pp->cfg0_mod_base +
499 pp->config.cfg0_size; 422 pp->cfg0_size;
500 } 423 }
501 } 424 }
502 425
426 ret = of_pci_parse_bus_range(np, &pp->busn);
427 if (ret < 0) {
428 pp->busn.name = np->name;
429 pp->busn.start = 0;
430 pp->busn.end = 0xff;
431 pp->busn.flags = IORESOURCE_BUS;
432 dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
433 ret, &pp->busn);
434 }
435
503 if (!pp->dbi_base) { 436 if (!pp->dbi_base) {
504 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, 437 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
505 resource_size(&pp->cfg)); 438 resource_size(&pp->cfg));
@@ -511,17 +444,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
511 444
512 pp->mem_base = pp->mem.start; 445 pp->mem_base = pp->mem.start;
513 446
514 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
515 pp->config.cfg0_size);
516 if (!pp->va_cfg0_base) { 447 if (!pp->va_cfg0_base) {
517 dev_err(pp->dev, "error with ioremap in function\n"); 448 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
518 return -ENOMEM; 449 pp->cfg0_size);
450 if (!pp->va_cfg0_base) {
451 dev_err(pp->dev, "error with ioremap in function\n");
452 return -ENOMEM;
453 }
519 } 454 }
520 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, 455
521 pp->config.cfg1_size);
522 if (!pp->va_cfg1_base) { 456 if (!pp->va_cfg1_base) {
523 dev_err(pp->dev, "error with ioremap\n"); 457 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
524 return -ENOMEM; 458 pp->cfg1_size);
459 if (!pp->va_cfg1_base) {
460 dev_err(pp->dev, "error with ioremap\n");
461 return -ENOMEM;
462 }
525 } 463 }
526 464
527 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { 465 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
@@ -530,16 +468,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
530 } 468 }
531 469
532 if (IS_ENABLED(CONFIG_PCI_MSI)) { 470 if (IS_ENABLED(CONFIG_PCI_MSI)) {
533 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, 471 if (!pp->ops->msi_host_init) {
534 MAX_MSI_IRQS, &msi_domain_ops, 472 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
535 &dw_pcie_msi_chip); 473 MAX_MSI_IRQS, &msi_domain_ops,
536 if (!pp->irq_domain) { 474 &dw_pcie_msi_chip);
537 dev_err(pp->dev, "irq domain init failed\n"); 475 if (!pp->irq_domain) {
538 return -ENXIO; 476 dev_err(pp->dev, "irq domain init failed\n");
539 } 477 return -ENXIO;
478 }
540 479
541 for (i = 0; i < MAX_MSI_IRQS; i++) 480 for (i = 0; i < MAX_MSI_IRQS; i++)
542 irq_create_mapping(pp->irq_domain, i); 481 irq_create_mapping(pp->irq_domain, i);
482 } else {
483 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
484 if (ret < 0)
485 return ret;
486 }
543 } 487 }
544 488
545 if (pp->ops->host_init) 489 if (pp->ops->host_init)
@@ -558,7 +502,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
558 dw_pci.private_data = (void **)&pp; 502 dw_pci.private_data = (void **)&pp;
559 503
560 pci_common_init_dev(pp->dev, &dw_pci); 504 pci_common_init_dev(pp->dev, &dw_pci);
561 pci_assign_unassigned_resources();
562#ifdef CONFIG_PCI_DOMAINS 505#ifdef CONFIG_PCI_DOMAINS
563 dw_pci.domain++; 506 dw_pci.domain++;
564#endif 507#endif
@@ -573,7 +516,7 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
573 PCIE_ATU_VIEWPORT); 516 PCIE_ATU_VIEWPORT);
574 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); 517 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
575 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); 518 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
576 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1, 519 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
577 PCIE_ATU_LIMIT); 520 PCIE_ATU_LIMIT);
578 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 521 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
579 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 522 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -589,7 +532,7 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
589 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); 532 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
590 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); 533 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
591 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); 534 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
592 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1, 535 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
593 PCIE_ATU_LIMIT); 536 PCIE_ATU_LIMIT);
594 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 537 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
595 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 538 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -604,10 +547,10 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
604 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); 547 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
605 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); 548 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
606 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); 549 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
607 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1, 550 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
608 PCIE_ATU_LIMIT); 551 PCIE_ATU_LIMIT);
609 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); 552 dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
610 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), 553 dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
611 PCIE_ATU_UPPER_TARGET); 554 PCIE_ATU_UPPER_TARGET);
612 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 555 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
613} 556}
@@ -620,10 +563,10 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
620 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); 563 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
621 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); 564 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
622 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); 565 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
623 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1, 566 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
624 PCIE_ATU_LIMIT); 567 PCIE_ATU_LIMIT);
625 dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); 568 dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
626 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), 569 dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
627 PCIE_ATU_UPPER_TARGET); 570 PCIE_ATU_UPPER_TARGET);
628 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 571 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
629} 572}
@@ -707,11 +650,6 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
707 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 650 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
708 int ret; 651 int ret;
709 652
710 if (!pp) {
711 BUG();
712 return -EINVAL;
713 }
714
715 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { 653 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
716 *val = 0xffffffff; 654 *val = 0xffffffff;
717 return PCIBIOS_DEVICE_NOT_FOUND; 655 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -736,11 +674,6 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
736 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 674 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
737 int ret; 675 int ret;
738 676
739 if (!pp) {
740 BUG();
741 return -EINVAL;
742 }
743
744 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) 677 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
745 return PCIBIOS_DEVICE_NOT_FOUND; 678 return PCIBIOS_DEVICE_NOT_FOUND;
746 679
@@ -768,19 +701,17 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
768 701
769 pp = sys_to_pcie(sys); 702 pp = sys_to_pcie(sys);
770 703
771 if (!pp) 704 if (global_io_offset < SZ_1M && pp->io_size > 0) {
772 return 0; 705 sys->io_offset = global_io_offset - pp->io_bus_addr;
773
774 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
775 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
776 pci_ioremap_io(global_io_offset, pp->io_base); 706 pci_ioremap_io(global_io_offset, pp->io_base);
777 global_io_offset += SZ_64K; 707 global_io_offset += SZ_64K;
778 pci_add_resource_offset(&sys->resources, &pp->io, 708 pci_add_resource_offset(&sys->resources, &pp->io,
779 sys->io_offset); 709 sys->io_offset);
780 } 710 }
781 711
782 sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; 712 sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
783 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); 713 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
714 pci_add_resource(&sys->resources, &pp->busn);
784 715
785 return 1; 716 return 1;
786} 717}
@@ -790,14 +721,16 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
790 struct pci_bus *bus; 721 struct pci_bus *bus;
791 struct pcie_port *pp = sys_to_pcie(sys); 722 struct pcie_port *pp = sys_to_pcie(sys);
792 723
793 if (pp) { 724 pp->root_bus_nr = sys->busnr;
794 pp->root_bus_nr = sys->busnr; 725 bus = pci_create_root_bus(pp->dev, sys->busnr,
795 bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops, 726 &dw_pcie_ops, sys, &sys->resources);
796 sys, &sys->resources); 727 if (!bus)
797 } else { 728 return NULL;
798 bus = NULL; 729
799 BUG(); 730 pci_scan_child_bus(bus);
800 } 731
732 if (bus && pp->ops->scan_bus)
733 pp->ops->scan_bus(pp);
801 734
802 return bus; 735 return bus;
803} 736}
@@ -833,7 +766,6 @@ static struct hw_pci dw_pci = {
833 766
834void dw_pcie_setup_rc(struct pcie_port *pp) 767void dw_pcie_setup_rc(struct pcie_port *pp)
835{ 768{
836 struct pcie_port_info *config = &pp->config;
837 u32 val; 769 u32 val;
838 u32 membase; 770 u32 membase;
839 u32 memlimit; 771 u32 memlimit;
@@ -888,7 +820,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
888 820
889 /* setup memory base, memory limit */ 821 /* setup memory base, memory limit */
890 membase = ((u32)pp->mem_base & 0xfff00000) >> 16; 822 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
891 memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; 823 memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
892 val = memlimit | membase; 824 val = memlimit | membase;
893 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE); 825 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
894 826
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index daf81f922cda..c6256751daff 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -14,15 +14,6 @@
14#ifndef _PCIE_DESIGNWARE_H 14#ifndef _PCIE_DESIGNWARE_H
15#define _PCIE_DESIGNWARE_H 15#define _PCIE_DESIGNWARE_H
16 16
17struct pcie_port_info {
18 u32 cfg0_size;
19 u32 cfg1_size;
20 u32 io_size;
21 u32 mem_size;
22 phys_addr_t io_bus_addr;
23 phys_addr_t mem_bus_addr;
24};
25
26/* 17/*
27 * Maximum number of MSI IRQs can be 256 per controller. But keep 18 * Maximum number of MSI IRQs can be 256 per controller. But keep
28 * it 32 as of now. Probably we will never need more than 32. If needed, 19 * it 32 as of now. Probably we will never need more than 32. If needed,
@@ -38,17 +29,23 @@ struct pcie_port {
38 u64 cfg0_base; 29 u64 cfg0_base;
39 u64 cfg0_mod_base; 30 u64 cfg0_mod_base;
40 void __iomem *va_cfg0_base; 31 void __iomem *va_cfg0_base;
32 u32 cfg0_size;
41 u64 cfg1_base; 33 u64 cfg1_base;
42 u64 cfg1_mod_base; 34 u64 cfg1_mod_base;
43 void __iomem *va_cfg1_base; 35 void __iomem *va_cfg1_base;
36 u32 cfg1_size;
44 u64 io_base; 37 u64 io_base;
45 u64 io_mod_base; 38 u64 io_mod_base;
39 phys_addr_t io_bus_addr;
40 u32 io_size;
46 u64 mem_base; 41 u64 mem_base;
47 u64 mem_mod_base; 42 u64 mem_mod_base;
43 phys_addr_t mem_bus_addr;
44 u32 mem_size;
48 struct resource cfg; 45 struct resource cfg;
49 struct resource io; 46 struct resource io;
50 struct resource mem; 47 struct resource mem;
51 struct pcie_port_info config; 48 struct resource busn;
52 int irq; 49 int irq;
53 u32 lanes; 50 u32 lanes;
54 struct pcie_host_ops *ops; 51 struct pcie_host_ops *ops;
@@ -73,7 +70,10 @@ struct pcie_host_ops {
73 void (*host_init)(struct pcie_port *pp); 70 void (*host_init)(struct pcie_port *pp);
74 void (*msi_set_irq)(struct pcie_port *pp, int irq); 71 void (*msi_set_irq)(struct pcie_port *pp, int irq);
75 void (*msi_clear_irq)(struct pcie_port *pp, int irq); 72 void (*msi_clear_irq)(struct pcie_port *pp, int irq);
76 u32 (*get_msi_data)(struct pcie_port *pp); 73 u32 (*get_msi_addr)(struct pcie_port *pp);
74 u32 (*get_msi_data)(struct pcie_port *pp, int pos);
75 void (*scan_bus)(struct pcie_port *pp);
76 int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
77}; 77};
78 78
79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); 79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 4884ee5e07d4..61158e03ab5f 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -323,6 +323,7 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
323 323
324 /* Setup PCIe address space mappings for each resource */ 324 /* Setup PCIe address space mappings for each resource */
325 resource_size_t size; 325 resource_size_t size;
326 resource_size_t res_start;
326 u32 mask; 327 u32 mask;
327 328
328 rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); 329 rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -335,8 +336,13 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
335 mask = (roundup_pow_of_two(size) / SZ_128) - 1; 336 mask = (roundup_pow_of_two(size) / SZ_128) - 1;
336 rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); 337 rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
337 338
338 rcar_pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win)); 339 if (res->flags & IORESOURCE_IO)
339 rcar_pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win)); 340 res_start = pci_pio_to_address(res->start);
341 else
342 res_start = res->start;
343
344 rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPARH(win));
345 rcar_pci_write_reg(pcie, lower_32_bits(res_start), PCIEPARL(win));
340 346
341 /* First resource is for IO */ 347 /* First resource is for IO */
342 mask = PAR_ENABLE; 348 mask = PAR_ENABLE;
@@ -363,9 +369,10 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
363 369
364 rcar_pcie_setup_window(i, pcie); 370 rcar_pcie_setup_window(i, pcie);
365 371
366 if (res->flags & IORESOURCE_IO) 372 if (res->flags & IORESOURCE_IO) {
367 pci_ioremap_io(nr * SZ_64K, res->start); 373 phys_addr_t io_start = pci_pio_to_address(res->start);
368 else 374 pci_ioremap_io(nr * SZ_64K, io_start);
375 } else
369 pci_add_resource(&sys->resources, res); 376 pci_add_resource(&sys->resources, res);
370 } 377 }
371 pci_add_resource(&sys->resources, &pcie->busn); 378 pci_add_resource(&sys->resources, &pcie->busn);
@@ -935,8 +942,10 @@ static int rcar_pcie_probe(struct platform_device *pdev)
935 } 942 }
936 943
937 for_each_of_pci_range(&parser, &range) { 944 for_each_of_pci_range(&parser, &range) {
938 of_pci_range_to_resource(&range, pdev->dev.of_node, 945 err = of_pci_range_to_resource(&range, pdev->dev.of_node,
939 &pcie->res[win++]); 946 &pcie->res[win++]);
947 if (err < 0)
948 return err;
940 949
941 if (win > RCAR_PCI_MAX_RESOURCES) 950 if (win > RCAR_PCI_MAX_RESOURCES)
942 break; 951 break;
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 6dea9e43a75c..85f594e1708f 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -340,7 +340,7 @@ static int __init spear13xx_pcie_probe(struct platform_device *pdev)
340 340
341 pp->dev = dev; 341 pp->dev = dev;
342 342
343 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 343 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
344 pp->dbi_base = devm_ioremap_resource(dev, dbi_base); 344 pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
345 if (IS_ERR(pp->dbi_base)) { 345 if (IS_ERR(pp->dbi_base)) {
346 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); 346 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
new file mode 100644
index 000000000000..ccc496b33a97
--- /dev/null
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -0,0 +1,970 @@
1/*
2 * PCIe host controller driver for Xilinx AXI PCIe Bridge
3 *
4 * Copyright (c) 2012 - 2014 Xilinx, Inc.
5 *
6 * Based on the Tegra PCIe driver
7 *
8 * Bits taken from Synopsys Designware Host controller driver and
9 * ARM PCI Host generic driver.
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/irqdomain.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/msi.h>
23#include <linux/of_address.h>
24#include <linux/of_pci.h>
25#include <linux/of_platform.h>
26#include <linux/of_irq.h>
27#include <linux/pci.h>
28#include <linux/platform_device.h>
29
30/* Register definitions */
31#define XILINX_PCIE_REG_BIR 0x00000130
32#define XILINX_PCIE_REG_IDR 0x00000138
33#define XILINX_PCIE_REG_IMR 0x0000013c
34#define XILINX_PCIE_REG_PSCR 0x00000144
35#define XILINX_PCIE_REG_RPSC 0x00000148
36#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
37#define XILINX_PCIE_REG_MSIBASE2 0x00000150
38#define XILINX_PCIE_REG_RPEFR 0x00000154
39#define XILINX_PCIE_REG_RPIFR1 0x00000158
40#define XILINX_PCIE_REG_RPIFR2 0x0000015c
41
42/* Interrupt registers definitions */
43#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
44#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
45#define XILINX_PCIE_INTR_STR_ERR BIT(2)
46#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
47#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
48#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
49#define XILINX_PCIE_INTR_NONFATAL BIT(10)
50#define XILINX_PCIE_INTR_FATAL BIT(11)
51#define XILINX_PCIE_INTR_INTX BIT(16)
52#define XILINX_PCIE_INTR_MSI BIT(17)
53#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
54#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
55#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
56#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
57#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
58#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
59#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
60#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
61#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
62#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
63#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
64
65/* Root Port Error FIFO Read Register definitions */
66#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
67#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
68#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
69
70/* Root Port Interrupt FIFO Read Register 1 definitions */
71#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
72#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
73#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
74#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
75#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
76
77/* Bridge Info Register definitions */
78#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
79#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
80
81/* Root Port Interrupt FIFO Read Register 2 definitions */
82#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
83
84/* Root Port Status/control Register definitions */
85#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
86
87/* Phy Status/Control Register definitions */
88#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
89
90/* ECAM definitions */
91#define ECAM_BUS_NUM_SHIFT 20
92#define ECAM_DEV_NUM_SHIFT 12
93
94/* Number of MSI IRQs */
95#define XILINX_NUM_MSI_IRQS 128
96
97/* Number of Memory Resources */
98#define XILINX_MAX_NUM_RESOURCES 3
99
100/**
101 * struct xilinx_pcie_port - PCIe port information
102 * @reg_base: IO Mapped Register Base
103 * @irq: Interrupt number
104 * @msi_pages: MSI pages
105 * @root_busno: Root Bus number
106 * @dev: Device pointer
107 * @irq_domain: IRQ domain pointer
108 * @bus_range: Bus range
109 * @resources: Bus Resources
110 */
111struct xilinx_pcie_port {
112 void __iomem *reg_base;
113 u32 irq;
114 unsigned long msi_pages;
115 u8 root_busno;
116 struct device *dev;
117 struct irq_domain *irq_domain;
118 struct resource bus_range;
119 struct list_head resources;
120};
121
122static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
123
124static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
125{
126 return sys->private_data;
127}
128
129static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
130{
131 return readl(port->reg_base + reg);
132}
133
134static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
135{
136 writel(val, port->reg_base + reg);
137}
138
139static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
140{
141 return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
142 XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
143}
144
145/**
146 * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
147 * @port: PCIe port information
148 */
149static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
150{
151 u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
152
153 if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
154 dev_dbg(port->dev, "Requester ID %d\n",
155 val & XILINX_PCIE_RPEFR_REQ_ID);
156 pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
157 XILINX_PCIE_REG_RPEFR);
158 }
159}
160
161/**
162 * xilinx_pcie_valid_device - Check if a valid device is present on bus
163 * @bus: PCI Bus structure
164 * @devfn: device/function
165 *
166 * Return: 'true' on success and 'false' if invalid device is found
167 */
168static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
169{
170 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
171
172 /* Check if link is up when trying to access downstream ports */
173 if (bus->number != port->root_busno)
174 if (!xilinx_pcie_link_is_up(port))
175 return false;
176
177 /* Only one device down on each root port */
178 if (bus->number == port->root_busno && devfn > 0)
179 return false;
180
181 /*
182 * Do not read more than one device on the bus directly attached
183 * to RC.
184 */
185 if (bus->primary == port->root_busno && devfn > 0)
186 return false;
187
188 return true;
189}
190
191/**
192 * xilinx_pcie_config_base - Get configuration base
193 * @bus: PCI Bus structure
194 * @devfn: Device/function
195 * @where: Offset from base
196 *
197 * Return: Base address of the configuration space needed to be
198 * accessed.
199 */
200static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
201 unsigned int devfn, int where)
202{
203 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
204 int relbus;
205
206 relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
207 (devfn << ECAM_DEV_NUM_SHIFT);
208
209 return port->reg_base + relbus + where;
210}
211
212/**
213 * xilinx_pcie_read_config - Read configuration space
214 * @bus: PCI Bus structure
215 * @devfn: Device/function
216 * @where: Offset from base
217 * @size: Byte/word/dword
218 * @val: Value to be read
219 *
220 * Return: PCIBIOS_SUCCESSFUL on success
221 * PCIBIOS_DEVICE_NOT_FOUND on failure
222 */
223static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
224 int where, int size, u32 *val)
225{
226 void __iomem *addr;
227
228 if (!xilinx_pcie_valid_device(bus, devfn)) {
229 *val = 0xFFFFFFFF;
230 return PCIBIOS_DEVICE_NOT_FOUND;
231 }
232
233 addr = xilinx_pcie_config_base(bus, devfn, where);
234
235 switch (size) {
236 case 1:
237 *val = readb(addr);
238 break;
239 case 2:
240 *val = readw(addr);
241 break;
242 default:
243 *val = readl(addr);
244 break;
245 }
246
247 return PCIBIOS_SUCCESSFUL;
248}
249
250/**
251 * xilinx_pcie_write_config - Write configuration space
252 * @bus: PCI Bus structure
253 * @devfn: Device/function
254 * @where: Offset from base
255 * @size: Byte/word/dword
256 * @val: Value to be written to device
257 *
258 * Return: PCIBIOS_SUCCESSFUL on success
259 * PCIBIOS_DEVICE_NOT_FOUND on failure
260 */
261static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
262 int where, int size, u32 val)
263{
264 void __iomem *addr;
265
266 if (!xilinx_pcie_valid_device(bus, devfn))
267 return PCIBIOS_DEVICE_NOT_FOUND;
268
269 addr = xilinx_pcie_config_base(bus, devfn, where);
270
271 switch (size) {
272 case 1:
273 writeb(val, addr);
274 break;
275 case 2:
276 writew(val, addr);
277 break;
278 default:
279 writel(val, addr);
280 break;
281 }
282
283 return PCIBIOS_SUCCESSFUL;
284}
285
286/* PCIe operations */
287static struct pci_ops xilinx_pcie_ops = {
288 .read = xilinx_pcie_read_config,
289 .write = xilinx_pcie_write_config,
290};
291
292/* MSI functions */
293
294/**
295 * xilinx_pcie_destroy_msi - Free MSI number
296 * @irq: IRQ to be freed
297 */
298static void xilinx_pcie_destroy_msi(unsigned int irq)
299{
300 struct irq_desc *desc;
301 struct msi_desc *msi;
302 struct xilinx_pcie_port *port;
303
304 desc = irq_to_desc(irq);
305 msi = irq_desc_get_msi_desc(desc);
306 port = sys_to_pcie(msi->dev->bus->sysdata);
307
308 if (!test_bit(irq, msi_irq_in_use))
309 dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
310 else
311 clear_bit(irq, msi_irq_in_use);
312}
313
314/**
315 * xilinx_pcie_assign_msi - Allocate MSI number
316 * @port: PCIe port structure
317 *
318 * Return: A valid IRQ on success and error value on failure.
319 */
320static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
321{
322 int pos;
323
324 pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
325 if (pos < XILINX_NUM_MSI_IRQS)
326 set_bit(pos, msi_irq_in_use);
327 else
328 return -ENOSPC;
329
330 return pos;
331}
332
333/**
334 * xilinx_msi_teardown_irq - Destroy the MSI
335 * @chip: MSI Chip descriptor
336 * @irq: MSI IRQ to destroy
337 */
338static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
339{
340 xilinx_pcie_destroy_msi(irq);
341}
342
343/**
344 * xilinx_pcie_msi_setup_irq - Setup MSI request
345 * @chip: MSI chip pointer
346 * @pdev: PCIe device pointer
347 * @desc: MSI descriptor pointer
348 *
349 * Return: '0' on success and error value on failure
350 */
351static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
352 struct pci_dev *pdev,
353 struct msi_desc *desc)
354{
355 struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
356 unsigned int irq;
357 int hwirq;
358 struct msi_msg msg;
359 phys_addr_t msg_addr;
360
361 hwirq = xilinx_pcie_assign_msi(port);
362 if (hwirq < 0)
363 return hwirq;
364
365 irq = irq_create_mapping(port->irq_domain, hwirq);
366 if (!irq)
367 return -EINVAL;
368
369 irq_set_msi_desc(irq, desc);
370
371 msg_addr = virt_to_phys((void *)port->msi_pages);
372
373 msg.address_hi = 0;
374 msg.address_lo = msg_addr;
375 msg.data = irq;
376
377 write_msi_msg(irq, &msg);
378
379 return 0;
380}
381
382/* MSI Chip Descriptor */
383static struct msi_chip xilinx_pcie_msi_chip = {
384 .setup_irq = xilinx_pcie_msi_setup_irq,
385 .teardown_irq = xilinx_msi_teardown_irq,
386};
387
388/* HW Interrupt Chip Descriptor */
389static struct irq_chip xilinx_msi_irq_chip = {
390 .name = "Xilinx PCIe MSI",
391 .irq_enable = unmask_msi_irq,
392 .irq_disable = mask_msi_irq,
393 .irq_mask = mask_msi_irq,
394 .irq_unmask = unmask_msi_irq,
395};
396
397/**
398 * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
399 * @domain: IRQ domain
400 * @irq: Virtual IRQ number
401 * @hwirq: HW interrupt number
402 *
403 * Return: Always returns 0.
404 */
405static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
406 irq_hw_number_t hwirq)
407{
408 irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
409 irq_set_chip_data(irq, domain->host_data);
410 set_irq_flags(irq, IRQF_VALID);
411
412 return 0;
413}
414
415/* IRQ Domain operations */
416static const struct irq_domain_ops msi_domain_ops = {
417 .map = xilinx_pcie_msi_map,
418};
419
420/**
421 * xilinx_pcie_enable_msi - Enable MSI support
422 * @port: PCIe port information
423 */
424static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
425{
426 phys_addr_t msg_addr;
427
428 port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
429 msg_addr = virt_to_phys((void *)port->msi_pages);
430 pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
431 pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
432}
433
434/**
435 * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
436 * @bus: PCIe bus
437 */
438static void xilinx_pcie_add_bus(struct pci_bus *bus)
439{
440 if (IS_ENABLED(CONFIG_PCI_MSI)) {
441 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
442
443 xilinx_pcie_msi_chip.dev = port->dev;
444 bus->msi = &xilinx_pcie_msi_chip;
445 }
446}
447
448/* INTx Functions */
449
450/**
451 * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
452 * @domain: IRQ domain
453 * @irq: Virtual IRQ number
454 * @hwirq: HW interrupt number
455 *
456 * Return: Always returns 0.
457 */
458static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
459 irq_hw_number_t hwirq)
460{
461 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
462 irq_set_chip_data(irq, domain->host_data);
463 set_irq_flags(irq, IRQF_VALID);
464
465 return 0;
466}
467
468/* INTx IRQ Domain operations */
469static const struct irq_domain_ops intx_domain_ops = {
470 .map = xilinx_pcie_intx_map,
471};
472
473/* PCIe HW Functions */
474
475/**
476 * xilinx_pcie_intr_handler - Interrupt Service Handler
477 * @irq: IRQ number
478 * @data: PCIe port information
479 *
480 * Return: IRQ_HANDLED on success and IRQ_NONE on failure
481 */
482static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
483{
484 struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
485 u32 val, mask, status, msi_data;
486
487 /* Read interrupt decode and mask registers */
488 val = pcie_read(port, XILINX_PCIE_REG_IDR);
489 mask = pcie_read(port, XILINX_PCIE_REG_IMR);
490
491 status = val & mask;
492 if (!status)
493 return IRQ_NONE;
494
495 if (status & XILINX_PCIE_INTR_LINK_DOWN)
496 dev_warn(port->dev, "Link Down\n");
497
498 if (status & XILINX_PCIE_INTR_ECRC_ERR)
499 dev_warn(port->dev, "ECRC failed\n");
500
501 if (status & XILINX_PCIE_INTR_STR_ERR)
502 dev_warn(port->dev, "Streaming error\n");
503
504 if (status & XILINX_PCIE_INTR_HOT_RESET)
505 dev_info(port->dev, "Hot reset\n");
506
507 if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
508 dev_warn(port->dev, "ECAM access timeout\n");
509
510 if (status & XILINX_PCIE_INTR_CORRECTABLE) {
511 dev_warn(port->dev, "Correctable error message\n");
512 xilinx_pcie_clear_err_interrupts(port);
513 }
514
515 if (status & XILINX_PCIE_INTR_NONFATAL) {
516 dev_warn(port->dev, "Non fatal error message\n");
517 xilinx_pcie_clear_err_interrupts(port);
518 }
519
520 if (status & XILINX_PCIE_INTR_FATAL) {
521 dev_warn(port->dev, "Fatal error message\n");
522 xilinx_pcie_clear_err_interrupts(port);
523 }
524
525 if (status & XILINX_PCIE_INTR_INTX) {
526 /* INTx interrupt received */
527 val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
528
529 /* Check whether interrupt valid */
530 if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
531 dev_warn(port->dev, "RP Intr FIFO1 read error\n");
532 return IRQ_HANDLED;
533 }
534
535 /* Clear interrupt FIFO register 1 */
536 pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
537 XILINX_PCIE_REG_RPIFR1);
538
539 /* Handle INTx Interrupt */
540 val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
541 XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
542 generic_handle_irq(irq_find_mapping(port->irq_domain, val));
543 }
544
545 if (status & XILINX_PCIE_INTR_MSI) {
546 /* MSI Interrupt */
547 val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
548
549 if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
550 dev_warn(port->dev, "RP Intr FIFO1 read error\n");
551 return IRQ_HANDLED;
552 }
553
554 if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
555 msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
556 XILINX_PCIE_RPIFR2_MSG_DATA;
557
558 /* Clear interrupt FIFO register 1 */
559 pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
560 XILINX_PCIE_REG_RPIFR1);
561
562 if (IS_ENABLED(CONFIG_PCI_MSI)) {
563 /* Handle MSI Interrupt */
564 generic_handle_irq(msi_data);
565 }
566 }
567 }
568
569 if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
570 dev_warn(port->dev, "Slave unsupported request\n");
571
572 if (status & XILINX_PCIE_INTR_SLV_UNEXP)
573 dev_warn(port->dev, "Slave unexpected completion\n");
574
575 if (status & XILINX_PCIE_INTR_SLV_COMPL)
576 dev_warn(port->dev, "Slave completion timeout\n");
577
578 if (status & XILINX_PCIE_INTR_SLV_ERRP)
579 dev_warn(port->dev, "Slave Error Poison\n");
580
581 if (status & XILINX_PCIE_INTR_SLV_CMPABT)
582 dev_warn(port->dev, "Slave Completer Abort\n");
583
584 if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
585 dev_warn(port->dev, "Slave Illegal Burst\n");
586
587 if (status & XILINX_PCIE_INTR_MST_DECERR)
588 dev_warn(port->dev, "Master decode error\n");
589
590 if (status & XILINX_PCIE_INTR_MST_SLVERR)
591 dev_warn(port->dev, "Master slave error\n");
592
593 if (status & XILINX_PCIE_INTR_MST_ERRP)
594 dev_warn(port->dev, "Master error poison\n");
595
596 /* Clear the Interrupt Decode register */
597 pcie_write(port, status, XILINX_PCIE_REG_IDR);
598
599 return IRQ_HANDLED;
600}
601
602/**
603 * xilinx_pcie_free_irq_domain - Free IRQ domain
604 * @port: PCIe port information
605 */
606static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
607{
608 int i;
609 u32 irq, num_irqs;
610
611 /* Free IRQ Domain */
612 if (IS_ENABLED(CONFIG_PCI_MSI)) {
613
614 free_pages(port->msi_pages, 0);
615
616 num_irqs = XILINX_NUM_MSI_IRQS;
617 } else {
618 /* INTx */
619 num_irqs = 4;
620 }
621
622 for (i = 0; i < num_irqs; i++) {
623 irq = irq_find_mapping(port->irq_domain, i);
624 if (irq > 0)
625 irq_dispose_mapping(irq);
626 }
627
628 irq_domain_remove(port->irq_domain);
629}
630
631/**
632 * xilinx_pcie_init_irq_domain - Initialize IRQ domain
633 * @port: PCIe port information
634 *
635 * Return: '0' on success and error value on failure
636 */
637static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
638{
639 struct device *dev = port->dev;
640 struct device_node *node = dev->of_node;
641 struct device_node *pcie_intc_node;
642
643 /* Setup INTx */
644 pcie_intc_node = of_get_next_child(node, NULL);
645 if (!pcie_intc_node) {
646 dev_err(dev, "No PCIe Intc node found\n");
647 return PTR_ERR(pcie_intc_node);
648 }
649
650 port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
651 &intx_domain_ops,
652 port);
653 if (!port->irq_domain) {
654 dev_err(dev, "Failed to get a INTx IRQ domain\n");
655 return PTR_ERR(port->irq_domain);
656 }
657
658 /* Setup MSI */
659 if (IS_ENABLED(CONFIG_PCI_MSI)) {
660 port->irq_domain = irq_domain_add_linear(node,
661 XILINX_NUM_MSI_IRQS,
662 &msi_domain_ops,
663 &xilinx_pcie_msi_chip);
664 if (!port->irq_domain) {
665 dev_err(dev, "Failed to get a MSI IRQ domain\n");
666 return PTR_ERR(port->irq_domain);
667 }
668
669 xilinx_pcie_enable_msi(port);
670 }
671
672 return 0;
673}
674
675/**
676 * xilinx_pcie_init_port - Initialize hardware
677 * @port: PCIe port information
678 */
679static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
680{
681 if (xilinx_pcie_link_is_up(port))
682 dev_info(port->dev, "PCIe Link is UP\n");
683 else
684 dev_info(port->dev, "PCIe Link is DOWN\n");
685
686 /* Disable all interrupts */
687 pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
688 XILINX_PCIE_REG_IMR);
689
690 /* Clear pending interrupts */
691 pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
692 XILINX_PCIE_IMR_ALL_MASK,
693 XILINX_PCIE_REG_IDR);
694
695 /* Enable all interrupts */
696 pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
697
698 /* Enable the Bridge enable bit */
699 pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
700 XILINX_PCIE_REG_RPSC_BEN,
701 XILINX_PCIE_REG_RPSC);
702}
703
704/**
705 * xilinx_pcie_setup - Setup memory resources
706 * @nr: Bus number
707 * @sys: Per controller structure
708 *
709 * Return: '1' on success and error value on failure
710 */
711static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
712{
713 struct xilinx_pcie_port *port = sys_to_pcie(sys);
714
715 list_splice_init(&port->resources, &sys->resources);
716
717 return 1;
718}
719
720/**
721 * xilinx_pcie_scan_bus - Scan PCIe bus for devices
722 * @nr: Bus number
723 * @sys: Per controller structure
724 *
725 * Return: Valid Bus pointer on success and NULL on failure
726 */
727static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
728{
729 struct xilinx_pcie_port *port = sys_to_pcie(sys);
730 struct pci_bus *bus;
731
732 port->root_busno = sys->busnr;
733 bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
734 sys, &sys->resources);
735
736 return bus;
737}
738
739/**
740 * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
741 * @port: PCIe port information
742 *
743 * Return: '0' on success and error value on failure
744 */
745static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
746{
747 struct device *dev = port->dev;
748 struct device_node *node = dev->of_node;
749 struct resource *mem;
750 resource_size_t offset;
751 struct of_pci_range_parser parser;
752 struct of_pci_range range;
753 struct pci_host_bridge_window *win;
754 int err = 0, mem_resno = 0;
755
756 /* Get the ranges */
757 if (of_pci_range_parser_init(&parser, node)) {
758 dev_err(dev, "missing \"ranges\" property\n");
759 return -EINVAL;
760 }
761
762 /* Parse the ranges and add the resources found to the list */
763 for_each_of_pci_range(&parser, &range) {
764
765 if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
766 dev_err(dev, "Maximum memory resources exceeded\n");
767 return -EINVAL;
768 }
769
770 mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
771 if (!mem) {
772 err = -ENOMEM;
773 goto free_resources;
774 }
775
776 of_pci_range_to_resource(&range, node, mem);
777
778 switch (mem->flags & IORESOURCE_TYPE_BITS) {
779 case IORESOURCE_MEM:
780 offset = range.cpu_addr - range.pci_addr;
781 mem_resno++;
782 break;
783 default:
784 err = -EINVAL;
785 break;
786 }
787
788 if (err < 0) {
789 dev_warn(dev, "Invalid resource found %pR\n", mem);
790 continue;
791 }
792
793 err = request_resource(&iomem_resource, mem);
794 if (err)
795 goto free_resources;
796
797 pci_add_resource_offset(&port->resources, mem, offset);
798 }
799
800 /* Get the bus range */
801 if (of_pci_parse_bus_range(node, &port->bus_range)) {
802 u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
803 u8 last;
804
805 last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
806 XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
807
808 port->bus_range = (struct resource) {
809 .name = node->name,
810 .start = 0,
811 .end = last,
812 .flags = IORESOURCE_BUS,
813 };
814 }
815
816 /* Register bus resource */
817 pci_add_resource(&port->resources, &port->bus_range);
818
819 return 0;
820
821free_resources:
822 release_child_resources(&iomem_resource);
823 list_for_each_entry(win, &port->resources, list)
824 devm_kfree(dev, win->res);
825 pci_free_resource_list(&port->resources);
826
827 return err;
828}
829
830/**
831 * xilinx_pcie_parse_dt - Parse Device tree
832 * @port: PCIe port information
833 *
834 * Return: '0' on success and error value on failure
835 */
836static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
837{
838 struct device *dev = port->dev;
839 struct device_node *node = dev->of_node;
840 struct resource regs;
841 const char *type;
842 int err;
843
844 type = of_get_property(node, "device_type", NULL);
845 if (!type || strcmp(type, "pci")) {
846 dev_err(dev, "invalid \"device_type\" %s\n", type);
847 return -EINVAL;
848 }
849
850 err = of_address_to_resource(node, 0, &regs);
851 if (err) {
852 dev_err(dev, "missing \"reg\" property\n");
853 return err;
854 }
855
856 port->reg_base = devm_ioremap_resource(dev, &regs);
857 if (IS_ERR(port->reg_base))
858 return PTR_ERR(port->reg_base);
859
860 port->irq = irq_of_parse_and_map(node, 0);
861 err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
862 IRQF_SHARED, "xilinx-pcie", port);
863 if (err) {
864 dev_err(dev, "unable to request irq %d\n", port->irq);
865 return err;
866 }
867
868 return 0;
869}
870
871/**
872 * xilinx_pcie_probe - Probe function
873 * @pdev: Platform device pointer
874 *
875 * Return: '0' on success and error value on failure
876 */
877static int xilinx_pcie_probe(struct platform_device *pdev)
878{
879 struct xilinx_pcie_port *port;
880 struct hw_pci hw;
881 struct device *dev = &pdev->dev;
882 int err;
883
884 if (!dev->of_node)
885 return -ENODEV;
886
887 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
888 if (!port)
889 return -ENOMEM;
890
891 port->dev = dev;
892
893 err = xilinx_pcie_parse_dt(port);
894 if (err) {
895 dev_err(dev, "Parsing DT failed\n");
896 return err;
897 }
898
899 xilinx_pcie_init_port(port);
900
901 err = xilinx_pcie_init_irq_domain(port);
902 if (err) {
903 dev_err(dev, "Failed creating IRQ Domain\n");
904 return err;
905 }
906
907 /*
908 * Parse PCI ranges, configuration bus range and
909 * request their resources
910 */
911 INIT_LIST_HEAD(&port->resources);
912 err = xilinx_pcie_parse_and_add_res(port);
913 if (err) {
914 dev_err(dev, "Failed adding resources\n");
915 return err;
916 }
917
918 platform_set_drvdata(pdev, port);
919
920 /* Register the device */
921 memset(&hw, 0, sizeof(hw));
922 hw = (struct hw_pci) {
923 .nr_controllers = 1,
924 .private_data = (void **)&port,
925 .setup = xilinx_pcie_setup,
926 .map_irq = of_irq_parse_and_map_pci,
927 .add_bus = xilinx_pcie_add_bus,
928 .scan = xilinx_pcie_scan_bus,
929 .ops = &xilinx_pcie_ops,
930 };
931 pci_common_init_dev(dev, &hw);
932
933 return 0;
934}
935
936/**
937 * xilinx_pcie_remove - Remove function
938 * @pdev: Platform device pointer
939 *
940 * Return: '0' always
941 */
942static int xilinx_pcie_remove(struct platform_device *pdev)
943{
944 struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
945
946 xilinx_pcie_free_irq_domain(port);
947
948 return 0;
949}
950
951static struct of_device_id xilinx_pcie_of_match[] = {
952 { .compatible = "xlnx,axi-pcie-host-1.00.a", },
953 {}
954};
955
956static struct platform_driver xilinx_pcie_driver = {
957 .driver = {
958 .name = "xilinx-pcie",
959 .owner = THIS_MODULE,
960 .of_match_table = xilinx_pcie_of_match,
961 .suppress_bind_attrs = true,
962 },
963 .probe = xilinx_pcie_probe,
964 .remove = xilinx_pcie_remove,
965};
966module_platform_driver(xilinx_pcie_driver);
967
968MODULE_AUTHOR("Xilinx Inc");
969MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
970MODULE_LICENSE("GPL v2");