aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/host/Kconfig19
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-imx6.c51
-rw-r--r--drivers/pci/host/pci-keystone-dw.c516
-rw-r--r--drivers/pci/host/pci-keystone.c415
-rw-r--r--drivers/pci/host/pci-keystone.h58
-rw-r--r--drivers/pci/host/pci-mvebu.c6
-rw-r--r--drivers/pci/host/pci-tegra.c269
-rw-r--r--drivers/pci/host/pcie-designware.c170
-rw-r--r--drivers/pci/host/pcie-designware.h22
-rw-r--r--drivers/pci/host/pcie-spear13xx.c2
-rw-r--r--drivers/pci/host/pcie-xilinx.c970
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c254
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c11
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c13
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c28
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c44
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c19
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c13
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c19
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c3
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c3
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c45
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c17
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c9
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c180
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c14
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c5
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c8
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci-acpi.c276
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c17
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c11
-rw-r--r--drivers/pci/pcie/portdrv_pci.c74
-rw-r--r--drivers/pci/probe.c156
-rw-r--r--drivers/pci/quirks.c112
-rw-r--r--drivers/pci/search.c34
46 files changed, 3000 insertions, 898 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 8922c376456a..34134d64f35a 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -63,4 +63,23 @@ config PCIE_SPEAR13XX
63 help 63 help
64 Say Y here if you want PCIe support on SPEAr13XX SoCs. 64 Say Y here if you want PCIe support on SPEAr13XX SoCs.
65 65
66
67config PCI_KEYSTONE
68 bool "TI Keystone PCIe controller"
69 depends on ARCH_KEYSTONE
70 select PCIE_DW
71 select PCIEPORTBUS
72 help
73 Say Y here if you want to enable PCI controller support on Keystone
74 SoCs. The PCI controller on Keystone is based on Designware hardware
75 and therefore the driver re-uses the Designware core functions to
76 implement the driver.
77
78config PCIE_XILINX
79 bool "Xilinx AXI PCIe host bridge support"
80 depends on ARCH_ZYNQ
81 help
82 Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
83 Host Bridge driver.
84
66endmenu 85endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d0e88f114ff9..182929cdbcd9 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -8,3 +8,5 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
8obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o 8obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
9obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o 9obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
10obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o 10obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
11obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index a568efaa331c..233fe8a88264 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -49,6 +49,9 @@ struct imx6_pcie {
49 49
50/* PCIe Port Logic registers (memory-mapped) */ 50/* PCIe Port Logic registers (memory-mapped) */
51#define PL_OFFSET 0x700 51#define PL_OFFSET 0x700
52#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
53#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
54#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
52#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) 55#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
53#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) 56#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
54#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) 57#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
@@ -214,6 +217,32 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
214static int imx6_pcie_assert_core_reset(struct pcie_port *pp) 217static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
215{ 218{
216 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 219 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
220 u32 val, gpr1, gpr12;
221
222 /*
223 * If the bootloader already enabled the link we need some special
224 * handling to get the core back into a state where it is safe to
225 * touch it for configuration. As there is no dedicated reset signal
226 * wired up for MX6QDL, we need to manually force LTSSM into "detect"
227 * state before completely disabling LTSSM, which is a prerequisite
228 * for core configuration.
229 *
230 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
231 * indication that the bootloader activated the link.
232 */
233 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
234 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
235
236 if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
237 (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
238 val = readl(pp->dbi_base + PCIE_PL_PFLR);
239 val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
240 val |= PCIE_PL_PFLR_FORCE_LINK;
241 writel(val, pp->dbi_base + PCIE_PL_PFLR);
242
243 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
244 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
245 }
217 246
218 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 247 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
219 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); 248 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
@@ -228,11 +257,6 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
228 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 257 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
229 int ret; 258 int ret;
230 259
231 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
232 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
233 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
234 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
235
236 ret = clk_prepare_enable(imx6_pcie->pcie_phy); 260 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
237 if (ret) { 261 if (ret) {
238 dev_err(pp->dev, "unable to enable pcie_phy clock\n"); 262 dev_err(pp->dev, "unable to enable pcie_phy clock\n");
@@ -254,6 +278,12 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
254 /* allow the clocks to stabilize */ 278 /* allow the clocks to stabilize */
255 usleep_range(200, 500); 279 usleep_range(200, 500);
256 280
281 /* power up core phy and enable ref clock */
282 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
283 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
284 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
285 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
286
257 /* Some boards don't have PCIe reset GPIO. */ 287 /* Some boards don't have PCIe reset GPIO. */
258 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 288 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
259 gpio_set_value(imx6_pcie->reset_gpio, 0); 289 gpio_set_value(imx6_pcie->reset_gpio, 0);
@@ -589,6 +619,14 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
589 return 0; 619 return 0;
590} 620}
591 621
622static void imx6_pcie_shutdown(struct platform_device *pdev)
623{
624 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
625
626 /* bring down link, so bootloader gets clean state in case of reboot */
627 imx6_pcie_assert_core_reset(&imx6_pcie->pp);
628}
629
592static const struct of_device_id imx6_pcie_of_match[] = { 630static const struct of_device_id imx6_pcie_of_match[] = {
593 { .compatible = "fsl,imx6q-pcie", }, 631 { .compatible = "fsl,imx6q-pcie", },
594 {}, 632 {},
@@ -601,6 +639,7 @@ static struct platform_driver imx6_pcie_driver = {
601 .owner = THIS_MODULE, 639 .owner = THIS_MODULE,
602 .of_match_table = imx6_pcie_of_match, 640 .of_match_table = imx6_pcie_of_match,
603 }, 641 },
642 .shutdown = imx6_pcie_shutdown,
604}; 643};
605 644
606/* Freescale PCIe driver does not allow module unload */ 645/* Freescale PCIe driver does not allow module unload */
@@ -609,7 +648,7 @@ static int __init imx6_pcie_init(void)
609{ 648{
610 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); 649 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
611} 650}
612fs_initcall(imx6_pcie_init); 651module_init(imx6_pcie_init);
613 652
614MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>"); 653MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
615MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver"); 654MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
new file mode 100644
index 000000000000..34086ce88e8e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -0,0 +1,516 @@
1/*
2 * Designware application register space functions for Keystone PCI controller
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_pci.h>
20#include <linux/pci.h>
21#include <linux/platform_device.h>
22
23#include "pcie-designware.h"
24#include "pci-keystone.h"
25
26/* Application register defines */
27#define LTSSM_EN_VAL 1
28#define LTSSM_STATE_MASK 0x1f
29#define LTSSM_STATE_L0 0x11
30#define DBI_CS2_EN_VAL 0x20
31#define OB_XLAT_EN_VAL 2
32
33/* Application registers */
34#define CMD_STATUS 0x004
35#define CFG_SETUP 0x008
36#define OB_SIZE 0x030
37#define CFG_PCIM_WIN_SZ_IDX 3
38#define CFG_PCIM_WIN_CNT 32
39#define SPACE0_REMOTE_CFG_OFFSET 0x1000
40#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
41#define OB_OFFSET_HI(n) (0x204 + (8 * n))
42
43/* IRQ register defines */
44#define IRQ_EOI 0x050
45#define IRQ_STATUS 0x184
46#define IRQ_ENABLE_SET 0x188
47#define IRQ_ENABLE_CLR 0x18c
48
49#define MSI_IRQ 0x054
50#define MSI0_IRQ_STATUS 0x104
51#define MSI0_IRQ_ENABLE_SET 0x108
52#define MSI0_IRQ_ENABLE_CLR 0x10c
53#define IRQ_STATUS 0x184
54#define MSI_IRQ_OFFSET 4
55
56/* Config space registers */
57#define DEBUG0 0x728
58
59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
60
61static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
62{
63 return sys->private_data;
64}
65
66static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
67 u32 *bit_pos)
68{
69 *reg_offset = offset % 8;
70 *bit_pos = offset >> 3;
71}
72
73u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
74{
75 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
76
77 return ks_pcie->app.start + MSI_IRQ;
78}
79
80void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
81{
82 struct pcie_port *pp = &ks_pcie->pp;
83 u32 pending, vector;
84 int src, virq;
85
86 pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
87
88 /*
89 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
90 * shows 1, 9, 17, 25 and so forth
91 */
92 for (src = 0; src < 4; src++) {
93 if (BIT(src) & pending) {
94 vector = offset + (src << 3);
95 virq = irq_linear_revmap(pp->irq_domain, vector);
96 dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
97 src, vector, virq);
98 generic_handle_irq(virq);
99 }
100 }
101}
102
103static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
104{
105 u32 offset, reg_offset, bit_pos;
106 struct keystone_pcie *ks_pcie;
107 unsigned int irq = d->irq;
108 struct msi_desc *msi;
109 struct pcie_port *pp;
110
111 msi = irq_get_msi_desc(irq);
112 pp = sys_to_pcie(msi->dev->bus->sysdata);
113 ks_pcie = to_keystone_pcie(pp);
114 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
115 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
116
117 writel(BIT(bit_pos),
118 ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
119 writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
120}
121
122void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
123{
124 u32 reg_offset, bit_pos;
125 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
126
127 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
128 writel(BIT(bit_pos),
129 ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
130}
131
132void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
133{
134 u32 reg_offset, bit_pos;
135 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
136
137 update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
138 writel(BIT(bit_pos),
139 ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
140}
141
142static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
143{
144 struct keystone_pcie *ks_pcie;
145 unsigned int irq = d->irq;
146 struct msi_desc *msi;
147 struct pcie_port *pp;
148 u32 offset;
149
150 msi = irq_get_msi_desc(irq);
151 pp = sys_to_pcie(msi->dev->bus->sysdata);
152 ks_pcie = to_keystone_pcie(pp);
153 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
154
155 /* Mask the end point if PVM implemented */
156 if (IS_ENABLED(CONFIG_PCI_MSI)) {
157 if (msi->msi_attrib.maskbit)
158 mask_msi_irq(d);
159 }
160
161 ks_dw_pcie_msi_clear_irq(pp, offset);
162}
163
164static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
165{
166 struct keystone_pcie *ks_pcie;
167 unsigned int irq = d->irq;
168 struct msi_desc *msi;
169 struct pcie_port *pp;
170 u32 offset;
171
172 msi = irq_get_msi_desc(irq);
173 pp = sys_to_pcie(msi->dev->bus->sysdata);
174 ks_pcie = to_keystone_pcie(pp);
175 offset = irq - irq_linear_revmap(pp->irq_domain, 0);
176
177 /* Mask the end point if PVM implemented */
178 if (IS_ENABLED(CONFIG_PCI_MSI)) {
179 if (msi->msi_attrib.maskbit)
180 unmask_msi_irq(d);
181 }
182
183 ks_dw_pcie_msi_set_irq(pp, offset);
184}
185
186static struct irq_chip ks_dw_pcie_msi_irq_chip = {
187 .name = "Keystone-PCIe-MSI-IRQ",
188 .irq_ack = ks_dw_pcie_msi_irq_ack,
189 .irq_mask = ks_dw_pcie_msi_irq_mask,
190 .irq_unmask = ks_dw_pcie_msi_irq_unmask,
191};
192
193static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
194 irq_hw_number_t hwirq)
195{
196 irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
197 handle_level_irq);
198 irq_set_chip_data(irq, domain->host_data);
199 set_irq_flags(irq, IRQF_VALID);
200
201 return 0;
202}
203
204const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
205 .map = ks_dw_pcie_msi_map,
206};
207
208int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip)
209{
210 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
211 int i;
212
213 pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
214 MAX_MSI_IRQS,
215 &ks_dw_pcie_msi_domain_ops,
216 chip);
217 if (!pp->irq_domain) {
218 dev_err(pp->dev, "irq domain init failed\n");
219 return -ENXIO;
220 }
221
222 for (i = 0; i < MAX_MSI_IRQS; i++)
223 irq_create_mapping(pp->irq_domain, i);
224
225 return 0;
226}
227
228void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
229{
230 int i;
231
232 for (i = 0; i < MAX_LEGACY_IRQS; i++)
233 writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
234}
235
236void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
237{
238 struct pcie_port *pp = &ks_pcie->pp;
239 u32 pending;
240 int virq;
241
242 pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
243
244 if (BIT(0) & pending) {
245 virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
246 dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
247 virq);
248 generic_handle_irq(virq);
249 }
250
251 /* EOI the INTx interrupt */
252 writel(offset, ks_pcie->va_app_base + IRQ_EOI);
253}
254
255static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
256{
257}
258
259static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
260{
261}
262
263static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
264{
265}
266
267static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
268 .name = "Keystone-PCI-Legacy-IRQ",
269 .irq_ack = ks_dw_pcie_ack_legacy_irq,
270 .irq_mask = ks_dw_pcie_mask_legacy_irq,
271 .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
272};
273
274static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
275 unsigned int irq, irq_hw_number_t hw_irq)
276{
277 irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
278 handle_level_irq);
279 irq_set_chip_data(irq, d->host_data);
280 set_irq_flags(irq, IRQF_VALID);
281
282 return 0;
283}
284
285static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
286 .map = ks_dw_pcie_init_legacy_irq_map,
287 .xlate = irq_domain_xlate_onetwocell,
288};
289
290/**
291 * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
292 * registers
293 *
294 * Since modification of dbi_cs2 involves different clock domain, read the
295 * status back to ensure the transition is complete.
296 */
297static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
298{
299 u32 val;
300
301 writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
302 reg_virt + CMD_STATUS);
303
304 do {
305 val = readl(reg_virt + CMD_STATUS);
306 } while (!(val & DBI_CS2_EN_VAL));
307}
308
309/**
310 * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
311 *
312 * Since modification of dbi_cs2 involves different clock domain, read the
313 * status back to ensure the transition is complete.
314 */
315static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
316{
317 u32 val;
318
319 writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
320 reg_virt + CMD_STATUS);
321
322 do {
323 val = readl(reg_virt + CMD_STATUS);
324 } while (val & DBI_CS2_EN_VAL);
325}
326
327void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
328{
329 struct pcie_port *pp = &ks_pcie->pp;
330 u32 start = pp->mem.start, end = pp->mem.end;
331 int i, tr_size;
332
333 /* Disable BARs for inbound access */
334 ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
335 writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
336 writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
337 ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
338
339 /* Set outbound translation size per window division */
340 writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
341
342 tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
343
344 /* Using Direct 1:1 mapping of RC <-> PCI memory space */
345 for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
346 writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
347 writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
348 start += tr_size;
349 }
350
351 /* Enable OB translation */
352 writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
353 ks_pcie->va_app_base + CMD_STATUS);
354}
355
356/**
357 * ks_pcie_cfg_setup() - Set up configuration space address for a device
358 *
359 * @ks_pcie: ptr to keystone_pcie structure
360 * @bus: Bus number the device is residing on
361 * @devfn: device, function number info
362 *
363 * Forms and returns the address of configuration space mapped in PCIESS
364 * address space 0. Also configures CFG_SETUP for remote configuration space
365 * access.
366 *
367 * The address space has two regions to access configuration - local and remote.
368 * We access local region for bus 0 (as RC is attached on bus 0) and remote
369 * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
370 * we will do TYPE 0 access as it will be on our secondary bus (logical).
371 * CFG_SETUP is needed only for remote configuration access.
372 */
373static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
374 unsigned int devfn)
375{
376 u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
377 struct pcie_port *pp = &ks_pcie->pp;
378 u32 regval;
379
380 if (bus == 0)
381 return pp->dbi_base;
382
383 regval = (bus << 16) | (device << 8) | function;
384
385 /*
386 * Since Bus#1 will be a virtual bus, we need to have TYPE0
387 * access only.
388 * TYPE 1
389 */
390 if (bus != 1)
391 regval |= BIT(24);
392
393 writel(regval, ks_pcie->va_app_base + CFG_SETUP);
394 return pp->va_cfg0_base;
395}
396
397int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
398 unsigned int devfn, int where, int size, u32 *val)
399{
400 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
401 u8 bus_num = bus->number;
402 void __iomem *addr;
403
404 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
405
406 return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
407}
408
409int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
410 unsigned int devfn, int where, int size, u32 val)
411{
412 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
413 u8 bus_num = bus->number;
414 void __iomem *addr;
415
416 addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
417
418 return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
419}
420
421/**
422 * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
423 *
424 * This sets BAR0 to enable inbound access for MSI_IRQ register
425 */
426void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
427{
428 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
429
430 /* Configure and set up BAR0 */
431 ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
432
433 /* Enable BAR0 */
434 writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
435 writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
436
437 ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
438
439 /*
440 * For BAR0, just setting bus address for inbound writes (MSI) should
441 * be sufficient. Use physical address to avoid any conflicts.
442 */
443 writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
444}
445
446/**
447 * ks_dw_pcie_link_up() - Check if link up
448 */
449int ks_dw_pcie_link_up(struct pcie_port *pp)
450{
451 u32 val = readl(pp->dbi_base + DEBUG0);
452
453 return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
454}
455
456void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
457{
458 u32 val;
459
460 /* Disable Link training */
461 val = readl(ks_pcie->va_app_base + CMD_STATUS);
462 val &= ~LTSSM_EN_VAL;
463 writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
464
465 /* Initiate Link Training */
466 val = readl(ks_pcie->va_app_base + CMD_STATUS);
467 writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
468}
469
470/**
471 * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
472 *
473 * Ioremap the register resources, initialize legacy irq domain
474 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
475 * PCI host controller.
476 */
477int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
478 struct device_node *msi_intc_np)
479{
480 struct pcie_port *pp = &ks_pcie->pp;
481 struct platform_device *pdev = to_platform_device(pp->dev);
482 struct resource *res;
483
484 /* Index 0 is the config reg. space address */
485 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
486 pp->dbi_base = devm_ioremap_resource(pp->dev, res);
487 if (IS_ERR(pp->dbi_base))
488 return PTR_ERR(pp->dbi_base);
489
490 /*
491 * We set these same and is used in pcie rd/wr_other_conf
492 * functions
493 */
494 pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
495 pp->va_cfg1_base = pp->va_cfg0_base;
496
497 /* Index 1 is the application reg. space address */
498 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
499 ks_pcie->app = *res;
500 ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
501 if (IS_ERR(ks_pcie->va_app_base))
502 return PTR_ERR(ks_pcie->va_app_base);
503
504 /* Create legacy IRQ domain */
505 ks_pcie->legacy_irq_domain =
506 irq_domain_add_linear(ks_pcie->legacy_intc_np,
507 MAX_LEGACY_IRQS,
508 &ks_dw_pcie_legacy_irq_domain_ops,
509 NULL);
510 if (!ks_pcie->legacy_irq_domain) {
511 dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
512 return -EINVAL;
513 }
514
515 return dw_pcie_host_init(pp);
516}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
new file mode 100644
index 000000000000..1b893bc8b842
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.c
@@ -0,0 +1,415 @@
1/*
2 * PCIe host controller driver for Texas Instruments Keystone SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 * Implementation based on pci-exynos.c and pcie-designware.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/irqchip/chained_irq.h>
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/irqdomain.h>
19#include <linux/module.h>
20#include <linux/msi.h>
21#include <linux/of_irq.h>
22#include <linux/of.h>
23#include <linux/of_pci.h>
24#include <linux/platform_device.h>
25#include <linux/phy/phy.h>
26#include <linux/resource.h>
27#include <linux/signal.h>
28
29#include "pcie-designware.h"
30#include "pci-keystone.h"
31
32#define DRIVER_NAME "keystone-pcie"
33
34/* driver specific constants */
35#define MAX_MSI_HOST_IRQS 8
36#define MAX_LEGACY_HOST_IRQS 4
37
38/* DEV_STAT_CTRL */
39#define PCIE_CAP_BASE 0x70
40
41/* PCIE controller device IDs */
42#define PCIE_RC_K2HK 0xb008
43#define PCIE_RC_K2E 0xb009
44#define PCIE_RC_K2L 0xb00a
45
46#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
47
48static void quirk_limit_mrrs(struct pci_dev *dev)
49{
50 struct pci_bus *bus = dev->bus;
51 struct pci_dev *bridge = bus->self;
52 static const struct pci_device_id rc_pci_devids[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
54 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
55 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
56 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
57 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
58 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
59 { 0, },
60 };
61
62 if (pci_is_root_bus(bus))
63 return;
64
65 /* look for the host bridge */
66 while (!pci_is_root_bus(bus)) {
67 bridge = bus->self;
68 bus = bus->parent;
69 }
70
71 if (bridge) {
72 /*
73 * Keystone PCI controller has a h/w limitation of
74 * 256 bytes maximum read request size. It can't handle
75 * anything higher than this. So force this limit on
76 * all downstream devices.
77 */
78 if (pci_match_id(rc_pci_devids, bridge)) {
79 if (pcie_get_readrq(dev) > 256) {
80 dev_info(&dev->dev, "limiting MRRS to 256\n");
81 pcie_set_readrq(dev, 256);
82 }
83 }
84 }
85}
86DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
87
88static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
89{
90 struct pcie_port *pp = &ks_pcie->pp;
91 int count = 200;
92
93 dw_pcie_setup_rc(pp);
94
95 if (dw_pcie_link_up(pp)) {
96 dev_err(pp->dev, "Link already up\n");
97 return 0;
98 }
99
100 ks_dw_pcie_initiate_link_train(ks_pcie);
101 /* check if the link is up or not */
102 while (!dw_pcie_link_up(pp)) {
103 usleep_range(100, 1000);
104 if (--count) {
105 ks_dw_pcie_initiate_link_train(ks_pcie);
106 continue;
107 }
108 dev_err(pp->dev, "phy link never came up\n");
109 return -EINVAL;
110 }
111
112 return 0;
113}
114
115static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
116{
117 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
118 u32 offset = irq - ks_pcie->msi_host_irqs[0];
119 struct pcie_port *pp = &ks_pcie->pp;
120 struct irq_chip *chip = irq_desc_get_chip(desc);
121
122 dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
123
124 /*
125 * The chained irq handler installation would have replaced normal
126 * interrupt driver handler so we need to take care of mask/unmask and
127 * ack operation.
128 */
129 chained_irq_enter(chip, desc);
130 ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
131 chained_irq_exit(chip, desc);
132}
133
134/**
135 * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
136 * @irq: IRQ line for legacy interrupts
137 * @desc: Pointer to irq descriptor
138 *
139 * Traverse through pending legacy interrupts and invoke handler for each. Also
140 * takes care of interrupt controller level mask/ack operation.
141 */
142static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
143{
144 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
145 struct pcie_port *pp = &ks_pcie->pp;
146 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
147 struct irq_chip *chip = irq_desc_get_chip(desc);
148
149 dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
150
151 /*
152 * The chained irq handler installation would have replaced normal
153 * interrupt driver handler so we need to take care of mask/unmask and
154 * ack operation.
155 */
156 chained_irq_enter(chip, desc);
157 ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
158 chained_irq_exit(chip, desc);
159}
160
161static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
162 char *controller, int *num_irqs)
163{
164 int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
165 struct device *dev = ks_pcie->pp.dev;
166 struct device_node *np_pcie = dev->of_node, **np_temp;
167
168 if (!strcmp(controller, "msi-interrupt-controller"))
169 legacy = 0;
170
171 if (legacy) {
172 np_temp = &ks_pcie->legacy_intc_np;
173 max_host_irqs = MAX_LEGACY_HOST_IRQS;
174 host_irqs = &ks_pcie->legacy_host_irqs[0];
175 } else {
176 np_temp = &ks_pcie->msi_intc_np;
177 max_host_irqs = MAX_MSI_HOST_IRQS;
178 host_irqs = &ks_pcie->msi_host_irqs[0];
179 }
180
181 /* interrupt controller is in a child node */
182 *np_temp = of_find_node_by_name(np_pcie, controller);
183 if (!(*np_temp)) {
184 dev_err(dev, "Node for %s is absent\n", controller);
185 goto out;
186 }
187 temp = of_irq_count(*np_temp);
188 if (!temp)
189 goto out;
190 if (temp > max_host_irqs)
191 dev_warn(dev, "Too many %s interrupts defined %u\n",
192 (legacy ? "legacy" : "MSI"), temp);
193
194 /*
195 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
196 * 7 (MSI)
197 */
198 for (temp = 0; temp < max_host_irqs; temp++) {
199 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
200 if (host_irqs[temp] < 0)
201 break;
202 }
203 if (temp) {
204 *num_irqs = temp;
205 ret = 0;
206 }
207out:
208 return ret;
209}
210
211static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
212{
213 int i;
214
215 /* Legacy IRQ */
216 for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
217 irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
218 irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
219 ks_pcie_legacy_irq_handler);
220 }
221 ks_dw_pcie_enable_legacy_irqs(ks_pcie);
222
223 /* MSI IRQ */
224 if (IS_ENABLED(CONFIG_PCI_MSI)) {
225 for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
226 irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
227 ks_pcie_msi_irq_handler);
228 irq_set_handler_data(ks_pcie->msi_host_irqs[i],
229 ks_pcie);
230 }
231 }
232}
233
234/*
235 * When a PCI device does not exist during config cycles, keystone host gets a
236 * bus error instead of returning 0xffffffff. This handler always returns 0
237 * for this kind of faults.
238 */
239static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
240 struct pt_regs *regs)
241{
242 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
243
244 if ((instr & 0x0e100090) == 0x00100090) {
245 int reg = (instr >> 12) & 15;
246
247 regs->uregs[reg] = -1;
248 regs->ARM_pc += 4;
249 }
250
251 return 0;
252}
253
254static void __init ks_pcie_host_init(struct pcie_port *pp)
255{
256 struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
257 u32 val;
258
259 ks_pcie_establish_link(ks_pcie);
260 ks_dw_pcie_setup_rc_app_regs(ks_pcie);
261 ks_pcie_setup_interrupts(ks_pcie);
262 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
263 pp->dbi_base + PCI_IO_BASE);
264
265 /* update the Vendor ID */
266 writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
267
268 /* update the DEV_STAT_CTRL to publish right mrrs */
269 val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
270 val &= ~PCI_EXP_DEVCTL_READRQ;
271 /* set the mrrs to 256 bytes */
272 val |= BIT(12);
273 writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
274
275 /*
276 * PCIe access errors that result into OCP errors are caught by ARM as
277 * "External aborts"
278 */
279 hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
280 "Asynchronous external abort");
281}
282
283static struct pcie_host_ops keystone_pcie_host_ops = {
284 .rd_other_conf = ks_dw_pcie_rd_other_conf,
285 .wr_other_conf = ks_dw_pcie_wr_other_conf,
286 .link_up = ks_dw_pcie_link_up,
287 .host_init = ks_pcie_host_init,
288 .msi_set_irq = ks_dw_pcie_msi_set_irq,
289 .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
290 .get_msi_addr = ks_dw_pcie_get_msi_addr,
291 .msi_host_init = ks_dw_pcie_msi_host_init,
292 .scan_bus = ks_dw_pcie_v3_65_scan_bus,
293};
294
295static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
296 struct platform_device *pdev)
297{
298 struct pcie_port *pp = &ks_pcie->pp;
299 int ret;
300
301 ret = ks_pcie_get_irq_controller_info(ks_pcie,
302 "legacy-interrupt-controller",
303 &ks_pcie->num_legacy_host_irqs);
304 if (ret)
305 return ret;
306
307 if (IS_ENABLED(CONFIG_PCI_MSI)) {
308 ret = ks_pcie_get_irq_controller_info(ks_pcie,
309 "msi-interrupt-controller",
310 &ks_pcie->num_msi_host_irqs);
311 if (ret)
312 return ret;
313 }
314
315 pp->root_bus_nr = -1;
316 pp->ops = &keystone_pcie_host_ops;
317 ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
318 if (ret) {
319 dev_err(&pdev->dev, "failed to initialize host\n");
320 return ret;
321 }
322
323 return ret;
324}
325
326static const struct of_device_id ks_pcie_of_match[] = {
327 {
328 .type = "pci",
329 .compatible = "ti,keystone-pcie",
330 },
331 { },
332};
333MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
334
335static int __exit ks_pcie_remove(struct platform_device *pdev)
336{
337 struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
338
339 clk_disable_unprepare(ks_pcie->clk);
340
341 return 0;
342}
343
344static int __init ks_pcie_probe(struct platform_device *pdev)
345{
346 struct device *dev = &pdev->dev;
347 struct keystone_pcie *ks_pcie;
348 struct pcie_port *pp;
349 struct resource *res;
350 void __iomem *reg_p;
351 struct phy *phy;
352 int ret = 0;
353
354 ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
355 GFP_KERNEL);
356 if (!ks_pcie) {
357 dev_err(dev, "no memory for keystone pcie\n");
358 return -ENOMEM;
359 }
360 pp = &ks_pcie->pp;
361
362 /* initialize SerDes Phy if present */
363 phy = devm_phy_get(dev, "pcie-phy");
364 if (!IS_ERR_OR_NULL(phy)) {
365 ret = phy_init(phy);
366 if (ret < 0)
367 return ret;
368 }
369
370 /* index 2 is to read PCI DEVICE_ID */
371 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
372 reg_p = devm_ioremap_resource(dev, res);
373 if (IS_ERR(reg_p))
374 return PTR_ERR(reg_p);
375 ks_pcie->device_id = readl(reg_p) >> 16;
376 devm_iounmap(dev, reg_p);
377 devm_release_mem_region(dev, res->start, resource_size(res));
378
379 pp->dev = dev;
380 platform_set_drvdata(pdev, ks_pcie);
381 ks_pcie->clk = devm_clk_get(dev, "pcie");
382 if (IS_ERR(ks_pcie->clk)) {
383 dev_err(dev, "Failed to get pcie rc clock\n");
384 return PTR_ERR(ks_pcie->clk);
385 }
386 ret = clk_prepare_enable(ks_pcie->clk);
387 if (ret)
388 return ret;
389
390 ret = ks_add_pcie_port(ks_pcie, pdev);
391 if (ret < 0)
392 goto fail_clk;
393
394 return 0;
395fail_clk:
396 clk_disable_unprepare(ks_pcie->clk);
397
398 return ret;
399}
400
401static struct platform_driver ks_pcie_driver __refdata = {
402 .probe = ks_pcie_probe,
403 .remove = __exit_p(ks_pcie_remove),
404 .driver = {
405 .name = "keystone-pcie",
406 .owner = THIS_MODULE,
407 .of_match_table = of_match_ptr(ks_pcie_of_match),
408 },
409};
410
411module_platform_driver(ks_pcie_driver);
412
413MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
414MODULE_DESCRIPTION("Keystone PCIe host controller driver");
415MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
new file mode 100644
index 000000000000..1fc1fceede9e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.h
@@ -0,0 +1,58 @@
1/*
2 * Keystone PCI Controller's common includes
3 *
4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5 * http://www.ti.com
6 *
7 * Author: Murali Karicheri <m-karicheri2@ti.com>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#define MAX_LEGACY_IRQS 4
16#define MAX_MSI_HOST_IRQS 8
17#define MAX_LEGACY_HOST_IRQS 4
18
19struct keystone_pcie {
20 struct clk *clk;
21 struct pcie_port pp;
22 /* PCI Device ID */
23 u32 device_id;
24 int num_legacy_host_irqs;
25 int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
26 struct device_node *legacy_intc_np;
27
28 int num_msi_host_irqs;
29 int msi_host_irqs[MAX_MSI_HOST_IRQS];
30 struct device_node *msi_intc_np;
31 struct irq_domain *legacy_irq_domain;
32
33 /* Application register space */
34 void __iomem *va_app_base;
35 struct resource app;
36};
37
38/* Keystone DW specific MSI controller APIs/definitions */
39void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
40u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
41
42/* Keystone specific PCI controller APIs */
43void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
44void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
45int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
46 struct device_node *msi_intc_np);
47int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
48 unsigned int devfn, int where, int size, u32 val);
49int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
50 unsigned int devfn, int where, int size, u32 *val);
51void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
52int ks_dw_pcie_link_up(struct pcie_port *pp);
53void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
54void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
55void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
56void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
57int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
58 struct msi_chip *chip);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index a8c6f1a92e0f..b1315e197ffb 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -873,7 +873,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
873 rangesz = pna + na + ns; 873 rangesz = pna + na + ns;
874 nranges = rlen / sizeof(__be32) / rangesz; 874 nranges = rlen / sizeof(__be32) / rangesz;
875 875
876 for (i = 0; i < nranges; i++) { 876 for (i = 0; i < nranges; i++, range += rangesz) {
877 u32 flags = of_read_number(range, 1); 877 u32 flags = of_read_number(range, 1);
878 u32 slot = of_read_number(range + 1, 1); 878 u32 slot = of_read_number(range + 1, 1);
879 u64 cpuaddr = of_read_number(range + na, pna); 879 u64 cpuaddr = of_read_number(range + na, pna);
@@ -883,14 +883,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
883 rtype = IORESOURCE_IO; 883 rtype = IORESOURCE_IO;
884 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) 884 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
885 rtype = IORESOURCE_MEM; 885 rtype = IORESOURCE_MEM;
886 else
887 continue;
886 888
887 if (slot == PCI_SLOT(devfn) && type == rtype) { 889 if (slot == PCI_SLOT(devfn) && type == rtype) {
888 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); 890 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
889 *attr = DT_CPUADDR_TO_ATTR(cpuaddr); 891 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
890 return 0; 892 return 0;
891 } 893 }
892
893 range += rangesz;
894 } 894 }
895 895
896 return -ENOENT; 896 return -ENOENT;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 946935db62b6..3d43874319be 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -38,6 +38,7 @@
38#include <linux/of_pci.h> 38#include <linux/of_pci.h>
39#include <linux/of_platform.h> 39#include <linux/of_platform.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/phy/phy.h>
41#include <linux/platform_device.h> 42#include <linux/platform_device.h>
42#include <linux/reset.h> 43#include <linux/reset.h>
43#include <linux/sizes.h> 44#include <linux/sizes.h>
@@ -115,13 +116,20 @@
115 116
116#define AFI_INTR_CODE 0xb8 117#define AFI_INTR_CODE 0xb8
117#define AFI_INTR_CODE_MASK 0xf 118#define AFI_INTR_CODE_MASK 0xf
118#define AFI_INTR_AXI_SLAVE_ERROR 1 119#define AFI_INTR_INI_SLAVE_ERROR 1
119#define AFI_INTR_AXI_DECODE_ERROR 2 120#define AFI_INTR_INI_DECODE_ERROR 2
120#define AFI_INTR_TARGET_ABORT 3 121#define AFI_INTR_TARGET_ABORT 3
121#define AFI_INTR_MASTER_ABORT 4 122#define AFI_INTR_MASTER_ABORT 4
122#define AFI_INTR_INVALID_WRITE 5 123#define AFI_INTR_INVALID_WRITE 5
123#define AFI_INTR_LEGACY 6 124#define AFI_INTR_LEGACY 6
124#define AFI_INTR_FPCI_DECODE_ERROR 7 125#define AFI_INTR_FPCI_DECODE_ERROR 7
126#define AFI_INTR_AXI_DECODE_ERROR 8
127#define AFI_INTR_FPCI_TIMEOUT 9
128#define AFI_INTR_PE_PRSNT_SENSE 10
129#define AFI_INTR_PE_CLKREQ_SENSE 11
130#define AFI_INTR_CLKCLAMP_SENSE 12
131#define AFI_INTR_RDY4PD_SENSE 13
132#define AFI_INTR_P2P_ERROR 14
125 133
126#define AFI_INTR_SIGNATURE 0xbc 134#define AFI_INTR_SIGNATURE 0xbc
127#define AFI_UPPER_FPCI_ADDRESS 0xc0 135#define AFI_UPPER_FPCI_ADDRESS 0xc0
@@ -152,8 +160,10 @@
152#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) 160#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
153#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) 161#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
154#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) 162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
155#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) 164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
156#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) 165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
166#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
157#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) 167#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
158 168
159#define AFI_FUSE 0x104 169#define AFI_FUSE 0x104
@@ -165,12 +175,21 @@
165#define AFI_PEX_CTRL_RST (1 << 0) 175#define AFI_PEX_CTRL_RST (1 << 0)
166#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) 176#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
167#define AFI_PEX_CTRL_REFCLK_EN (1 << 3) 177#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
178#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
179
180#define AFI_PLLE_CONTROL 0x160
181#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
168 183
169#define AFI_PEXBIAS_CTRL_0 0x168 184#define AFI_PEXBIAS_CTRL_0 0x168
170 185
171#define RP_VEND_XP 0x00000F00 186#define RP_VEND_XP 0x00000F00
172#define RP_VEND_XP_DL_UP (1 << 30) 187#define RP_VEND_XP_DL_UP (1 << 30)
173 188
189#define RP_PRIV_MISC 0x00000FE0
190#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192
174#define RP_LINK_CONTROL_STATUS 0x00000090 193#define RP_LINK_CONTROL_STATUS 0x00000090
175#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 194#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
176#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 195#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
@@ -197,6 +216,7 @@
197 216
198#define PADS_REFCLK_CFG0 0x000000C8 217#define PADS_REFCLK_CFG0 0x000000C8
199#define PADS_REFCLK_CFG1 0x000000CC 218#define PADS_REFCLK_CFG1 0x000000CC
219#define PADS_REFCLK_BIAS 0x000000D0
200 220
201/* 221/*
202 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit 222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
@@ -236,6 +256,7 @@ struct tegra_pcie_soc_data {
236 bool has_pex_bias_ctrl; 256 bool has_pex_bias_ctrl;
237 bool has_intr_prsnt_sense; 257 bool has_intr_prsnt_sense;
238 bool has_cml_clk; 258 bool has_cml_clk;
259 bool has_gen2;
239}; 260};
240 261
241static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) 262static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
@@ -253,6 +274,7 @@ struct tegra_pcie {
253 struct list_head buses; 274 struct list_head buses;
254 struct resource *cs; 275 struct resource *cs;
255 276
277 struct resource all;
256 struct resource io; 278 struct resource io;
257 struct resource mem; 279 struct resource mem;
258 struct resource prefetch; 280 struct resource prefetch;
@@ -267,6 +289,8 @@ struct tegra_pcie {
267 struct reset_control *afi_rst; 289 struct reset_control *afi_rst;
268 struct reset_control *pcie_xrst; 290 struct reset_control *pcie_xrst;
269 291
292 struct phy *phy;
293
270 struct tegra_msi msi; 294 struct tegra_msi msi;
271 295
272 struct list_head ports; 296 struct list_head ports;
@@ -382,7 +406,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
382 for (i = 0; i < 16; i++) { 406 for (i = 0; i < 16; i++) {
383 unsigned long virt = (unsigned long)bus->area->addr + 407 unsigned long virt = (unsigned long)bus->area->addr +
384 i * SZ_64K; 408 i * SZ_64K;
385 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K; 409 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
386 410
387 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot); 411 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
388 if (err < 0) { 412 if (err < 0) {
@@ -561,6 +585,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
561 if (soc->has_pex_clkreq_en) 585 if (soc->has_pex_clkreq_en)
562 value |= AFI_PEX_CTRL_CLKREQ_EN; 586 value |= AFI_PEX_CTRL_CLKREQ_EN;
563 587
588 value |= AFI_PEX_CTRL_OVERRIDE_EN;
589
564 afi_writel(port->pcie, value, ctrl); 590 afi_writel(port->pcie, value, ctrl);
565 591
566 tegra_pcie_port_reset(port); 592 tegra_pcie_port_reset(port);
@@ -568,6 +594,7 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
568 594
569static void tegra_pcie_port_disable(struct tegra_pcie_port *port) 595static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
570{ 596{
597 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
571 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); 598 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
572 unsigned long value; 599 unsigned long value;
573 600
@@ -578,6 +605,10 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
578 605
579 /* disable reference clock */ 606 /* disable reference clock */
580 value = afi_readl(port->pcie, ctrl); 607 value = afi_readl(port->pcie, ctrl);
608
609 if (soc->has_pex_clkreq_en)
610 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
611
581 value &= ~AFI_PEX_CTRL_REFCLK_EN; 612 value &= ~AFI_PEX_CTRL_REFCLK_EN;
582 afi_writel(port->pcie, value, ctrl); 613 afi_writel(port->pcie, value, ctrl);
583} 614}
@@ -626,7 +657,18 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
626static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) 657static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
627{ 658{
628 struct tegra_pcie *pcie = sys_to_pcie(sys); 659 struct tegra_pcie *pcie = sys_to_pcie(sys);
629 phys_addr_t io_start = pci_pio_to_address(pcie->io.start); 660 int err;
661 phys_addr_t io_start;
662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0)
665 return err;
666
667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
668 if (err)
669 return err;
670
671 io_start = pci_pio_to_address(pcie->io.start);
630 672
631 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
632 pci_add_resource_offset(&sys->resources, &pcie->prefetch, 674 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
@@ -685,9 +727,15 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
685 "Target abort", 727 "Target abort",
686 "Master abort", 728 "Master abort",
687 "Invalid write", 729 "Invalid write",
730 "Legacy interrupt",
688 "Response decoding error", 731 "Response decoding error",
689 "AXI response decoding error", 732 "AXI response decoding error",
690 "Transaction timeout", 733 "Transaction timeout",
734 "Slot present pin change",
735 "Slot clock request change",
736 "TMS clock ramp change",
737 "TMS ready for power down",
738 "Peer2Peer error",
691 }; 739 };
692 struct tegra_pcie *pcie = arg; 740 struct tegra_pcie *pcie = arg;
693 u32 code, signature; 741 u32 code, signature;
@@ -794,30 +842,27 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
794 afi_writel(pcie, 0, AFI_MSI_BAR_SZ); 842 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
795} 843}
796 844
797static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) 845static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
798{ 846{
799 const struct tegra_pcie_soc_data *soc = pcie->soc_data; 847 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
800 struct tegra_pcie_port *port; 848 u32 value;
801 unsigned int timeout;
802 unsigned long value;
803 849
804 /* power down PCIe slot clock bias pad */ 850 timeout = jiffies + msecs_to_jiffies(timeout);
805 if (soc->has_pex_bias_ctrl)
806 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
807 851
808 /* configure mode and disable all ports */ 852 while (time_before(jiffies, timeout)) {
809 value = afi_readl(pcie, AFI_PCIE_CONFIG); 853 value = pads_readl(pcie, soc->pads_pll_ctl);
810 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; 854 if (value & PADS_PLL_CTL_LOCKDET)
811 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; 855 return 0;
812 856 }
813 list_for_each_entry(port, &pcie->ports, list)
814 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
815 857
816 afi_writel(pcie, value, AFI_PCIE_CONFIG); 858 return -ETIMEDOUT;
859}
817 860
818 value = afi_readl(pcie, AFI_FUSE); 861static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
819 value |= AFI_FUSE_PCIE_T0_GEN2_DIS; 862{
820 afi_writel(pcie, value, AFI_FUSE); 863 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
864 u32 value;
865 int err;
821 866
822 /* initialize internal PHY, enable up to 16 PCIE lanes */ 867 /* initialize internal PHY, enable up to 16 PCIE lanes */
823 pads_writel(pcie, 0x0, PADS_CTL_SEL); 868 pads_writel(pcie, 0x0, PADS_CTL_SEL);
@@ -836,6 +881,13 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
836 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; 881 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
837 pads_writel(pcie, value, soc->pads_pll_ctl); 882 pads_writel(pcie, value, soc->pads_pll_ctl);
838 883
884 /* reset PLL */
885 value = pads_readl(pcie, soc->pads_pll_ctl);
886 value &= ~PADS_PLL_CTL_RST_B4SM;
887 pads_writel(pcie, value, soc->pads_pll_ctl);
888
889 usleep_range(20, 100);
890
839 /* take PLL out of reset */ 891 /* take PLL out of reset */
840 value = pads_readl(pcie, soc->pads_pll_ctl); 892 value = pads_readl(pcie, soc->pads_pll_ctl);
841 value |= PADS_PLL_CTL_RST_B4SM; 893 value |= PADS_PLL_CTL_RST_B4SM;
@@ -848,15 +900,11 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
848 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1); 900 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
849 901
850 /* wait for the PLL to lock */ 902 /* wait for the PLL to lock */
851 timeout = 300; 903 err = tegra_pcie_pll_wait(pcie, 500);
852 do { 904 if (err < 0) {
853 value = pads_readl(pcie, soc->pads_pll_ctl); 905 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
854 usleep_range(1000, 2000); 906 return err;
855 if (--timeout == 0) { 907 }
856 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
857 return -EBUSY;
858 }
859 } while (!(value & PADS_PLL_CTL_LOCKDET));
860 908
861 /* turn off IDDQ override */ 909 /* turn off IDDQ override */
862 value = pads_readl(pcie, PADS_CTL); 910 value = pads_readl(pcie, PADS_CTL);
@@ -868,6 +916,58 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
868 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; 916 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
869 pads_writel(pcie, value, PADS_CTL); 917 pads_writel(pcie, value, PADS_CTL);
870 918
919 return 0;
920}
921
922static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
923{
924 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
925 struct tegra_pcie_port *port;
926 unsigned long value;
927 int err;
928
929 /* enable PLL power down */
930 if (pcie->phy) {
931 value = afi_readl(pcie, AFI_PLLE_CONTROL);
932 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
933 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
934 afi_writel(pcie, value, AFI_PLLE_CONTROL);
935 }
936
937 /* power down PCIe slot clock bias pad */
938 if (soc->has_pex_bias_ctrl)
939 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
940
941 /* configure mode and disable all ports */
942 value = afi_readl(pcie, AFI_PCIE_CONFIG);
943 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
944 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
945
946 list_for_each_entry(port, &pcie->ports, list)
947 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
948
949 afi_writel(pcie, value, AFI_PCIE_CONFIG);
950
951 if (soc->has_gen2) {
952 value = afi_readl(pcie, AFI_FUSE);
953 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
954 afi_writel(pcie, value, AFI_FUSE);
955 } else {
956 value = afi_readl(pcie, AFI_FUSE);
957 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
958 afi_writel(pcie, value, AFI_FUSE);
959 }
960
961 if (!pcie->phy)
962 err = tegra_pcie_phy_enable(pcie);
963 else
964 err = phy_power_on(pcie->phy);
965
966 if (err < 0) {
967 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
968 return err;
969 }
970
871 /* take the PCIe interface module out of reset */ 971 /* take the PCIe interface module out of reset */
872 reset_control_deassert(pcie->pcie_xrst); 972 reset_control_deassert(pcie->pcie_xrst);
873 973
@@ -901,6 +1001,10 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
901 1001
902 /* TODO: disable and unprepare clocks? */ 1002 /* TODO: disable and unprepare clocks? */
903 1003
1004 err = phy_power_off(pcie->phy);
1005 if (err < 0)
1006 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
1007
904 reset_control_assert(pcie->pcie_xrst); 1008 reset_control_assert(pcie->pcie_xrst);
905 reset_control_assert(pcie->afi_rst); 1009 reset_control_assert(pcie->afi_rst);
906 reset_control_assert(pcie->pex_rst); 1010 reset_control_assert(pcie->pex_rst);
@@ -1022,6 +1126,19 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1022 return err; 1126 return err;
1023 } 1127 }
1024 1128
1129 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1130 if (IS_ERR(pcie->phy)) {
1131 err = PTR_ERR(pcie->phy);
1132 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1133 return err;
1134 }
1135
1136 err = phy_init(pcie->phy);
1137 if (err < 0) {
1138 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1139 return err;
1140 }
1141
1025 err = tegra_pcie_power_on(pcie); 1142 err = tegra_pcie_power_on(pcie);
1026 if (err) { 1143 if (err) {
1027 dev_err(&pdev->dev, "failed to power up: %d\n", err); 1144 dev_err(&pdev->dev, "failed to power up: %d\n", err);
@@ -1080,10 +1197,17 @@ poweroff:
1080 1197
1081static int tegra_pcie_put_resources(struct tegra_pcie *pcie) 1198static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1082{ 1199{
1200 int err;
1201
1083 if (pcie->irq > 0) 1202 if (pcie->irq > 0)
1084 free_irq(pcie->irq, pcie); 1203 free_irq(pcie->irq, pcie);
1085 1204
1086 tegra_pcie_power_off(pcie); 1205 tegra_pcie_power_off(pcie);
1206
1207 err = phy_exit(pcie->phy);
1208 if (err < 0)
1209 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1210
1087 return 0; 1211 return 0;
1088} 1212}
1089 1213
@@ -1172,8 +1296,10 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1172 return hwirq; 1296 return hwirq;
1173 1297
1174 irq = irq_create_mapping(msi->domain, hwirq); 1298 irq = irq_create_mapping(msi->domain, hwirq);
1175 if (!irq) 1299 if (!irq) {
1300 tegra_msi_free(msi, hwirq);
1176 return -EINVAL; 1301 return -EINVAL;
1302 }
1177 1303
1178 irq_set_msi_desc(irq, desc); 1304 irq_set_msi_desc(irq, desc);
1179 1305
@@ -1191,8 +1317,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1191{ 1317{
1192 struct tegra_msi *msi = to_tegra_msi(chip); 1318 struct tegra_msi *msi = to_tegra_msi(chip);
1193 struct irq_data *d = irq_get_irq_data(irq); 1319 struct irq_data *d = irq_get_irq_data(irq);
1320 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1194 1321
1195 tegra_msi_free(msi, d->hwirq); 1322 irq_dispose_mapping(irq);
1323 tegra_msi_free(msi, hwirq);
1196} 1324}
1197 1325
1198static struct irq_chip tegra_msi_irq_chip = { 1326static struct irq_chip tegra_msi_irq_chip = {
@@ -1329,7 +1457,19 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1329{ 1457{
1330 struct device_node *np = pcie->dev->of_node; 1458 struct device_node *np = pcie->dev->of_node;
1331 1459
1332 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1460 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1461 switch (lanes) {
1462 case 0x0000104:
1463 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1464 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1465 return 0;
1466
1467 case 0x0000102:
1468 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1469 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1470 return 0;
1471 }
1472 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1333 switch (lanes) { 1473 switch (lanes) {
1334 case 0x00000204: 1474 case 0x00000204:
1335 dev_info(pcie->dev, "4x1, 2x1 configuration\n"); 1475 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
@@ -1437,7 +1577,23 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1437 struct device_node *np = pcie->dev->of_node; 1577 struct device_node *np = pcie->dev->of_node;
1438 unsigned int i = 0; 1578 unsigned int i = 0;
1439 1579
1440 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { 1580 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1581 pcie->num_supplies = 7;
1582
1583 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1584 sizeof(*pcie->supplies),
1585 GFP_KERNEL);
1586 if (!pcie->supplies)
1587 return -ENOMEM;
1588
1589 pcie->supplies[i++].supply = "avddio-pex";
1590 pcie->supplies[i++].supply = "dvddio-pex";
1591 pcie->supplies[i++].supply = "avdd-pex-pll";
1592 pcie->supplies[i++].supply = "hvdd-pex";
1593 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1594 pcie->supplies[i++].supply = "vddio-pex-ctl";
1595 pcie->supplies[i++].supply = "avdd-pll-erefe";
1596 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1441 bool need_pexa = false, need_pexb = false; 1597 bool need_pexa = false, need_pexb = false;
1442 1598
1443 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ 1599 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
@@ -1516,6 +1672,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1516 struct resource res; 1672 struct resource res;
1517 int err; 1673 int err;
1518 1674
1675 memset(&pcie->all, 0, sizeof(pcie->all));
1676 pcie->all.flags = IORESOURCE_MEM;
1677 pcie->all.name = np->full_name;
1678 pcie->all.start = ~0;
1679 pcie->all.end = 0;
1680
1519 if (of_pci_range_parser_init(&parser, np)) { 1681 if (of_pci_range_parser_init(&parser, np)) {
1520 dev_err(pcie->dev, "missing \"ranges\" property\n"); 1682 dev_err(pcie->dev, "missing \"ranges\" property\n");
1521 return -EINVAL; 1683 return -EINVAL;
@@ -1529,21 +1691,31 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1529 switch (res.flags & IORESOURCE_TYPE_BITS) { 1691 switch (res.flags & IORESOURCE_TYPE_BITS) {
1530 case IORESOURCE_IO: 1692 case IORESOURCE_IO:
1531 memcpy(&pcie->io, &res, sizeof(res)); 1693 memcpy(&pcie->io, &res, sizeof(res));
1532 pcie->io.name = "I/O"; 1694 pcie->io.name = np->full_name;
1533 break; 1695 break;
1534 1696
1535 case IORESOURCE_MEM: 1697 case IORESOURCE_MEM:
1536 if (res.flags & IORESOURCE_PREFETCH) { 1698 if (res.flags & IORESOURCE_PREFETCH) {
1537 memcpy(&pcie->prefetch, &res, sizeof(res)); 1699 memcpy(&pcie->prefetch, &res, sizeof(res));
1538 pcie->prefetch.name = "PREFETCH"; 1700 pcie->prefetch.name = "prefetchable";
1539 } else { 1701 } else {
1540 memcpy(&pcie->mem, &res, sizeof(res)); 1702 memcpy(&pcie->mem, &res, sizeof(res));
1541 pcie->mem.name = "MEM"; 1703 pcie->mem.name = "non-prefetchable";
1542 } 1704 }
1543 break; 1705 break;
1544 } 1706 }
1707
1708 if (res.start <= pcie->all.start)
1709 pcie->all.start = res.start;
1710
1711 if (res.end >= pcie->all.end)
1712 pcie->all.end = res.end;
1545 } 1713 }
1546 1714
1715 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1716 if (err < 0)
1717 return err;
1718
1547 err = of_pci_parse_bus_range(np, &pcie->busn); 1719 err = of_pci_parse_bus_range(np, &pcie->busn);
1548 if (err < 0) { 1720 if (err < 0) {
1549 dev_err(pcie->dev, "failed to parse ranges property: %d\n", 1721 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -1645,6 +1817,12 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1645 unsigned int retries = 3; 1817 unsigned int retries = 3;
1646 unsigned long value; 1818 unsigned long value;
1647 1819
1820 /* override presence detection */
1821 value = readl(port->base + RP_PRIV_MISC);
1822 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1823 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1824 writel(value, port->base + RP_PRIV_MISC);
1825
1648 do { 1826 do {
1649 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; 1827 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1650 1828
@@ -1725,6 +1903,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1725 .has_pex_bias_ctrl = false, 1903 .has_pex_bias_ctrl = false,
1726 .has_intr_prsnt_sense = false, 1904 .has_intr_prsnt_sense = false,
1727 .has_cml_clk = false, 1905 .has_cml_clk = false,
1906 .has_gen2 = false,
1728}; 1907};
1729 1908
1730static const struct tegra_pcie_soc_data tegra30_pcie_data = { 1909static const struct tegra_pcie_soc_data tegra30_pcie_data = {
@@ -1736,9 +1915,23 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1736 .has_pex_bias_ctrl = true, 1915 .has_pex_bias_ctrl = true,
1737 .has_intr_prsnt_sense = true, 1916 .has_intr_prsnt_sense = true,
1738 .has_cml_clk = true, 1917 .has_cml_clk = true,
1918 .has_gen2 = false,
1919};
1920
1921static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1922 .num_ports = 2,
1923 .msi_base_shift = 8,
1924 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1925 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1926 .has_pex_clkreq_en = true,
1927 .has_pex_bias_ctrl = true,
1928 .has_intr_prsnt_sense = true,
1929 .has_cml_clk = true,
1930 .has_gen2 = true,
1739}; 1931};
1740 1932
1741static const struct of_device_id tegra_pcie_of_match[] = { 1933static const struct of_device_id tegra_pcie_of_match[] = {
1934 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1742 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data }, 1935 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1743 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data }, 1936 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1744 { }, 1937 { },
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 52bd3a143563..34e736601259 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -73,6 +73,8 @@ static unsigned long global_io_offset;
73 73
74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) 74static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
75{ 75{
76 BUG_ON(!sys->private_data);
77
76 return sys->private_data; 78 return sys->private_data;
77} 79}
78 80
@@ -261,11 +263,6 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
261 int irq, pos0, pos1, i; 263 int irq, pos0, pos1, i;
262 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); 264 struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
263 265
264 if (!pp) {
265 BUG();
266 return -EINVAL;
267 }
268
269 pos0 = find_first_zero_bit(pp->msi_irq_in_use, 266 pos0 = find_first_zero_bit(pp->msi_irq_in_use,
270 MAX_MSI_IRQS); 267 MAX_MSI_IRQS);
271 if (pos0 % no_irqs) { 268 if (pos0 % no_irqs) {
@@ -326,10 +323,6 @@ static void clear_irq(unsigned int irq)
326 /* get the port structure */ 323 /* get the port structure */
327 msi = irq_data_get_msi(data); 324 msi = irq_data_get_msi(data);
328 pp = sys_to_pcie(msi->dev->bus->sysdata); 325 pp = sys_to_pcie(msi->dev->bus->sysdata);
329 if (!pp) {
330 BUG();
331 return;
332 }
333 326
334 /* undo what was done in assign_irq */ 327 /* undo what was done in assign_irq */
335 pos = data->hwirq; 328 pos = data->hwirq;
@@ -350,11 +343,6 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
350 struct msi_msg msg; 343 struct msi_msg msg;
351 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); 344 struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
352 345
353 if (!pp) {
354 BUG();
355 return -EINVAL;
356 }
357
358 pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS, 346 pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
359 &msg_ctr); 347 &msg_ctr);
360 msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4; 348 msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
@@ -373,12 +361,17 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
373 */ 361 */
374 desc->msi_attrib.multiple = msgvec; 362 desc->msi_attrib.multiple = msgvec;
375 363
376 if (pp->ops->get_msi_data) 364 if (pp->ops->get_msi_addr)
377 msg.address_lo = pp->ops->get_msi_data(pp); 365 msg.address_lo = pp->ops->get_msi_addr(pp);
378 else 366 else
379 msg.address_lo = virt_to_phys((void *)pp->msi_data); 367 msg.address_lo = virt_to_phys((void *)pp->msi_data);
380 msg.address_hi = 0x0; 368 msg.address_hi = 0x0;
381 msg.data = pos; 369
370 if (pp->ops->get_msi_data)
371 msg.data = pp->ops->get_msi_data(pp, pos);
372 else
373 msg.data = pos;
374
382 write_msi_msg(irq, &msg); 375 write_msi_msg(irq, &msg);
383 376
384 return 0; 377 return 0;
@@ -425,7 +418,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
425 struct resource *cfg_res; 418 struct resource *cfg_res;
426 u32 val, na, ns; 419 u32 val, na, ns;
427 const __be32 *addrp; 420 const __be32 *addrp;
428 int i, index; 421 int i, index, ret;
429 422
430 /* Find the address cell size and the number of cells in order to get 423 /* Find the address cell size and the number of cells in order to get
431 * the untranslated address. 424 * the untranslated address.
@@ -435,16 +428,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
435 428
436 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 429 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
437 if (cfg_res) { 430 if (cfg_res) {
438 pp->config.cfg0_size = resource_size(cfg_res)/2; 431 pp->cfg0_size = resource_size(cfg_res)/2;
439 pp->config.cfg1_size = resource_size(cfg_res)/2; 432 pp->cfg1_size = resource_size(cfg_res)/2;
440 pp->cfg0_base = cfg_res->start; 433 pp->cfg0_base = cfg_res->start;
441 pp->cfg1_base = cfg_res->start + pp->config.cfg0_size; 434 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
442 435
443 /* Find the untranslated configuration space address */ 436 /* Find the untranslated configuration space address */
444 index = of_property_match_string(np, "reg-names", "config"); 437 index = of_property_match_string(np, "reg-names", "config");
445 addrp = of_get_address(np, index, false, false); 438 addrp = of_get_address(np, index, NULL, NULL);
446 pp->cfg0_mod_base = of_read_number(addrp, ns); 439 pp->cfg0_mod_base = of_read_number(addrp, ns);
447 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size; 440 pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
448 } else { 441 } else {
449 dev_err(pp->dev, "missing *config* reg space\n"); 442 dev_err(pp->dev, "missing *config* reg space\n");
450 } 443 }
@@ -466,9 +459,9 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
466 pp->io.end = min_t(resource_size_t, 459 pp->io.end = min_t(resource_size_t,
467 IO_SPACE_LIMIT, 460 IO_SPACE_LIMIT,
468 range.pci_addr + range.size 461 range.pci_addr + range.size
469 + global_io_offset); 462 + global_io_offset - 1);
470 pp->config.io_size = resource_size(&pp->io); 463 pp->io_size = resource_size(&pp->io);
471 pp->config.io_bus_addr = range.pci_addr; 464 pp->io_bus_addr = range.pci_addr;
472 pp->io_base = range.cpu_addr; 465 pp->io_base = range.cpu_addr;
473 466
474 /* Find the untranslated IO space address */ 467 /* Find the untranslated IO space address */
@@ -478,8 +471,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
478 if (restype == IORESOURCE_MEM) { 471 if (restype == IORESOURCE_MEM) {
479 of_pci_range_to_resource(&range, np, &pp->mem); 472 of_pci_range_to_resource(&range, np, &pp->mem);
480 pp->mem.name = "MEM"; 473 pp->mem.name = "MEM";
481 pp->config.mem_size = resource_size(&pp->mem); 474 pp->mem_size = resource_size(&pp->mem);
482 pp->config.mem_bus_addr = range.pci_addr; 475 pp->mem_bus_addr = range.pci_addr;
483 476
484 /* Find the untranslated MEM space address */ 477 /* Find the untranslated MEM space address */
485 pp->mem_mod_base = of_read_number(parser.range - 478 pp->mem_mod_base = of_read_number(parser.range -
@@ -487,19 +480,29 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
487 } 480 }
488 if (restype == 0) { 481 if (restype == 0) {
489 of_pci_range_to_resource(&range, np, &pp->cfg); 482 of_pci_range_to_resource(&range, np, &pp->cfg);
490 pp->config.cfg0_size = resource_size(&pp->cfg)/2; 483 pp->cfg0_size = resource_size(&pp->cfg)/2;
491 pp->config.cfg1_size = resource_size(&pp->cfg)/2; 484 pp->cfg1_size = resource_size(&pp->cfg)/2;
492 pp->cfg0_base = pp->cfg.start; 485 pp->cfg0_base = pp->cfg.start;
493 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; 486 pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
494 487
495 /* Find the untranslated configuration space address */ 488 /* Find the untranslated configuration space address */
496 pp->cfg0_mod_base = of_read_number(parser.range - 489 pp->cfg0_mod_base = of_read_number(parser.range -
497 parser.np + na, ns); 490 parser.np + na, ns);
498 pp->cfg1_mod_base = pp->cfg0_mod_base + 491 pp->cfg1_mod_base = pp->cfg0_mod_base +
499 pp->config.cfg0_size; 492 pp->cfg0_size;
500 } 493 }
501 } 494 }
502 495
496 ret = of_pci_parse_bus_range(np, &pp->busn);
497 if (ret < 0) {
498 pp->busn.name = np->name;
499 pp->busn.start = 0;
500 pp->busn.end = 0xff;
501 pp->busn.flags = IORESOURCE_BUS;
502 dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
503 ret, &pp->busn);
504 }
505
503 if (!pp->dbi_base) { 506 if (!pp->dbi_base) {
504 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, 507 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
505 resource_size(&pp->cfg)); 508 resource_size(&pp->cfg));
@@ -511,17 +514,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
511 514
512 pp->mem_base = pp->mem.start; 515 pp->mem_base = pp->mem.start;
513 516
514 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
515 pp->config.cfg0_size);
516 if (!pp->va_cfg0_base) { 517 if (!pp->va_cfg0_base) {
517 dev_err(pp->dev, "error with ioremap in function\n"); 518 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
518 return -ENOMEM; 519 pp->cfg0_size);
520 if (!pp->va_cfg0_base) {
521 dev_err(pp->dev, "error with ioremap in function\n");
522 return -ENOMEM;
523 }
519 } 524 }
520 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, 525
521 pp->config.cfg1_size);
522 if (!pp->va_cfg1_base) { 526 if (!pp->va_cfg1_base) {
523 dev_err(pp->dev, "error with ioremap\n"); 527 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
524 return -ENOMEM; 528 pp->cfg1_size);
529 if (!pp->va_cfg1_base) {
530 dev_err(pp->dev, "error with ioremap\n");
531 return -ENOMEM;
532 }
525 } 533 }
526 534
527 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { 535 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
@@ -530,16 +538,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
530 } 538 }
531 539
532 if (IS_ENABLED(CONFIG_PCI_MSI)) { 540 if (IS_ENABLED(CONFIG_PCI_MSI)) {
533 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, 541 if (!pp->ops->msi_host_init) {
534 MAX_MSI_IRQS, &msi_domain_ops, 542 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
535 &dw_pcie_msi_chip); 543 MAX_MSI_IRQS, &msi_domain_ops,
536 if (!pp->irq_domain) { 544 &dw_pcie_msi_chip);
537 dev_err(pp->dev, "irq domain init failed\n"); 545 if (!pp->irq_domain) {
538 return -ENXIO; 546 dev_err(pp->dev, "irq domain init failed\n");
539 } 547 return -ENXIO;
548 }
540 549
541 for (i = 0; i < MAX_MSI_IRQS; i++) 550 for (i = 0; i < MAX_MSI_IRQS; i++)
542 irq_create_mapping(pp->irq_domain, i); 551 irq_create_mapping(pp->irq_domain, i);
552 } else {
553 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
554 if (ret < 0)
555 return ret;
556 }
543 } 557 }
544 558
545 if (pp->ops->host_init) 559 if (pp->ops->host_init)
@@ -558,7 +572,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
558 dw_pci.private_data = (void **)&pp; 572 dw_pci.private_data = (void **)&pp;
559 573
560 pci_common_init_dev(pp->dev, &dw_pci); 574 pci_common_init_dev(pp->dev, &dw_pci);
561 pci_assign_unassigned_resources();
562#ifdef CONFIG_PCI_DOMAINS 575#ifdef CONFIG_PCI_DOMAINS
563 dw_pci.domain++; 576 dw_pci.domain++;
564#endif 577#endif
@@ -573,7 +586,7 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
573 PCIE_ATU_VIEWPORT); 586 PCIE_ATU_VIEWPORT);
574 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); 587 dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
575 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); 588 dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
576 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1, 589 dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
577 PCIE_ATU_LIMIT); 590 PCIE_ATU_LIMIT);
578 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 591 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
579 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 592 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -589,7 +602,7 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
589 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); 602 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
590 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); 603 dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
591 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); 604 dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
592 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1, 605 dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
593 PCIE_ATU_LIMIT); 606 PCIE_ATU_LIMIT);
594 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 607 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
595 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 608 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -604,10 +617,10 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
604 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); 617 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
605 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); 618 dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
606 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); 619 dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
607 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1, 620 dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
608 PCIE_ATU_LIMIT); 621 PCIE_ATU_LIMIT);
609 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); 622 dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
610 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), 623 dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
611 PCIE_ATU_UPPER_TARGET); 624 PCIE_ATU_UPPER_TARGET);
612 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 625 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
613} 626}
@@ -620,10 +633,10 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
620 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); 633 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
621 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); 634 dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
622 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); 635 dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
623 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1, 636 dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
624 PCIE_ATU_LIMIT); 637 PCIE_ATU_LIMIT);
625 dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); 638 dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
626 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), 639 dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
627 PCIE_ATU_UPPER_TARGET); 640 PCIE_ATU_UPPER_TARGET);
628 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 641 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
629} 642}
@@ -707,11 +720,6 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
707 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 720 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
708 int ret; 721 int ret;
709 722
710 if (!pp) {
711 BUG();
712 return -EINVAL;
713 }
714
715 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { 723 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
716 *val = 0xffffffff; 724 *val = 0xffffffff;
717 return PCIBIOS_DEVICE_NOT_FOUND; 725 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -736,11 +744,6 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
736 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 744 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
737 int ret; 745 int ret;
738 746
739 if (!pp) {
740 BUG();
741 return -EINVAL;
742 }
743
744 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) 747 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
745 return PCIBIOS_DEVICE_NOT_FOUND; 748 return PCIBIOS_DEVICE_NOT_FOUND;
746 749
@@ -768,19 +771,17 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
768 771
769 pp = sys_to_pcie(sys); 772 pp = sys_to_pcie(sys);
770 773
771 if (!pp) 774 if (global_io_offset < SZ_1M && pp->io_size > 0) {
772 return 0; 775 sys->io_offset = global_io_offset - pp->io_bus_addr;
773
774 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
775 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
776 pci_ioremap_io(global_io_offset, pp->io_base); 776 pci_ioremap_io(global_io_offset, pp->io_base);
777 global_io_offset += SZ_64K; 777 global_io_offset += SZ_64K;
778 pci_add_resource_offset(&sys->resources, &pp->io, 778 pci_add_resource_offset(&sys->resources, &pp->io,
779 sys->io_offset); 779 sys->io_offset);
780 } 780 }
781 781
782 sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr; 782 sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
783 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); 783 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
784 pci_add_resource(&sys->resources, &pp->busn);
784 785
785 return 1; 786 return 1;
786} 787}
@@ -790,14 +791,16 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
790 struct pci_bus *bus; 791 struct pci_bus *bus;
791 struct pcie_port *pp = sys_to_pcie(sys); 792 struct pcie_port *pp = sys_to_pcie(sys);
792 793
793 if (pp) { 794 pp->root_bus_nr = sys->busnr;
794 pp->root_bus_nr = sys->busnr; 795 bus = pci_create_root_bus(pp->dev, sys->busnr,
795 bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops, 796 &dw_pcie_ops, sys, &sys->resources);
796 sys, &sys->resources); 797 if (!bus)
797 } else { 798 return NULL;
798 bus = NULL; 799
799 BUG(); 800 pci_scan_child_bus(bus);
800 } 801
802 if (bus && pp->ops->scan_bus)
803 pp->ops->scan_bus(pp);
801 804
802 return bus; 805 return bus;
803} 806}
@@ -833,7 +836,6 @@ static struct hw_pci dw_pci = {
833 836
834void dw_pcie_setup_rc(struct pcie_port *pp) 837void dw_pcie_setup_rc(struct pcie_port *pp)
835{ 838{
836 struct pcie_port_info *config = &pp->config;
837 u32 val; 839 u32 val;
838 u32 membase; 840 u32 membase;
839 u32 memlimit; 841 u32 memlimit;
@@ -888,7 +890,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
888 890
889 /* setup memory base, memory limit */ 891 /* setup memory base, memory limit */
890 membase = ((u32)pp->mem_base & 0xfff00000) >> 16; 892 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
891 memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; 893 memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
892 val = memlimit | membase; 894 val = memlimit | membase;
893 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE); 895 dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
894 896
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index daf81f922cda..c6256751daff 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -14,15 +14,6 @@
14#ifndef _PCIE_DESIGNWARE_H 14#ifndef _PCIE_DESIGNWARE_H
15#define _PCIE_DESIGNWARE_H 15#define _PCIE_DESIGNWARE_H
16 16
17struct pcie_port_info {
18 u32 cfg0_size;
19 u32 cfg1_size;
20 u32 io_size;
21 u32 mem_size;
22 phys_addr_t io_bus_addr;
23 phys_addr_t mem_bus_addr;
24};
25
26/* 17/*
27 * Maximum number of MSI IRQs can be 256 per controller. But keep 18 * Maximum number of MSI IRQs can be 256 per controller. But keep
28 * it 32 as of now. Probably we will never need more than 32. If needed, 19 * it 32 as of now. Probably we will never need more than 32. If needed,
@@ -38,17 +29,23 @@ struct pcie_port {
38 u64 cfg0_base; 29 u64 cfg0_base;
39 u64 cfg0_mod_base; 30 u64 cfg0_mod_base;
40 void __iomem *va_cfg0_base; 31 void __iomem *va_cfg0_base;
32 u32 cfg0_size;
41 u64 cfg1_base; 33 u64 cfg1_base;
42 u64 cfg1_mod_base; 34 u64 cfg1_mod_base;
43 void __iomem *va_cfg1_base; 35 void __iomem *va_cfg1_base;
36 u32 cfg1_size;
44 u64 io_base; 37 u64 io_base;
45 u64 io_mod_base; 38 u64 io_mod_base;
39 phys_addr_t io_bus_addr;
40 u32 io_size;
46 u64 mem_base; 41 u64 mem_base;
47 u64 mem_mod_base; 42 u64 mem_mod_base;
43 phys_addr_t mem_bus_addr;
44 u32 mem_size;
48 struct resource cfg; 45 struct resource cfg;
49 struct resource io; 46 struct resource io;
50 struct resource mem; 47 struct resource mem;
51 struct pcie_port_info config; 48 struct resource busn;
52 int irq; 49 int irq;
53 u32 lanes; 50 u32 lanes;
54 struct pcie_host_ops *ops; 51 struct pcie_host_ops *ops;
@@ -73,7 +70,10 @@ struct pcie_host_ops {
73 void (*host_init)(struct pcie_port *pp); 70 void (*host_init)(struct pcie_port *pp);
74 void (*msi_set_irq)(struct pcie_port *pp, int irq); 71 void (*msi_set_irq)(struct pcie_port *pp, int irq);
75 void (*msi_clear_irq)(struct pcie_port *pp, int irq); 72 void (*msi_clear_irq)(struct pcie_port *pp, int irq);
76 u32 (*get_msi_data)(struct pcie_port *pp); 73 u32 (*get_msi_addr)(struct pcie_port *pp);
74 u32 (*get_msi_data)(struct pcie_port *pp, int pos);
75 void (*scan_bus)(struct pcie_port *pp);
76 int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
77}; 77};
78 78
79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); 79int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 6dea9e43a75c..85f594e1708f 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -340,7 +340,7 @@ static int __init spear13xx_pcie_probe(struct platform_device *pdev)
340 340
341 pp->dev = dev; 341 pp->dev = dev;
342 342
343 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 343 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
344 pp->dbi_base = devm_ioremap_resource(dev, dbi_base); 344 pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
345 if (IS_ERR(pp->dbi_base)) { 345 if (IS_ERR(pp->dbi_base)) {
346 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); 346 dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
new file mode 100644
index 000000000000..ccc496b33a97
--- /dev/null
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -0,0 +1,970 @@
1/*
2 * PCIe host controller driver for Xilinx AXI PCIe Bridge
3 *
4 * Copyright (c) 2012 - 2014 Xilinx, Inc.
5 *
6 * Based on the Tegra PCIe driver
7 *
8 * Bits taken from Synopsys Designware Host controller driver and
9 * ARM PCI Host generic driver.
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/irqdomain.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/msi.h>
23#include <linux/of_address.h>
24#include <linux/of_pci.h>
25#include <linux/of_platform.h>
26#include <linux/of_irq.h>
27#include <linux/pci.h>
28#include <linux/platform_device.h>
29
30/* Register definitions */
31#define XILINX_PCIE_REG_BIR 0x00000130
32#define XILINX_PCIE_REG_IDR 0x00000138
33#define XILINX_PCIE_REG_IMR 0x0000013c
34#define XILINX_PCIE_REG_PSCR 0x00000144
35#define XILINX_PCIE_REG_RPSC 0x00000148
36#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
37#define XILINX_PCIE_REG_MSIBASE2 0x00000150
38#define XILINX_PCIE_REG_RPEFR 0x00000154
39#define XILINX_PCIE_REG_RPIFR1 0x00000158
40#define XILINX_PCIE_REG_RPIFR2 0x0000015c
41
42/* Interrupt registers definitions */
43#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
44#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
45#define XILINX_PCIE_INTR_STR_ERR BIT(2)
46#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
47#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
48#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
49#define XILINX_PCIE_INTR_NONFATAL BIT(10)
50#define XILINX_PCIE_INTR_FATAL BIT(11)
51#define XILINX_PCIE_INTR_INTX BIT(16)
52#define XILINX_PCIE_INTR_MSI BIT(17)
53#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
54#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
55#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
56#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
57#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
58#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
59#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
60#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
61#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
62#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
63#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
64
65/* Root Port Error FIFO Read Register definitions */
66#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
67#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
68#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
69
70/* Root Port Interrupt FIFO Read Register 1 definitions */
71#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
72#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
73#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
74#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
75#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
76
77/* Bridge Info Register definitions */
78#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
79#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
80
81/* Root Port Interrupt FIFO Read Register 2 definitions */
82#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
83
84/* Root Port Status/control Register definitions */
85#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
86
87/* Phy Status/Control Register definitions */
88#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
89
90/* ECAM definitions */
91#define ECAM_BUS_NUM_SHIFT 20
92#define ECAM_DEV_NUM_SHIFT 12
93
94/* Number of MSI IRQs */
95#define XILINX_NUM_MSI_IRQS 128
96
97/* Number of Memory Resources */
98#define XILINX_MAX_NUM_RESOURCES 3
99
100/**
101 * struct xilinx_pcie_port - PCIe port information
102 * @reg_base: IO Mapped Register Base
103 * @irq: Interrupt number
104 * @msi_pages: MSI pages
105 * @root_busno: Root Bus number
106 * @dev: Device pointer
107 * @irq_domain: IRQ domain pointer
108 * @bus_range: Bus range
109 * @resources: Bus Resources
110 */
111struct xilinx_pcie_port {
112 void __iomem *reg_base;
113 u32 irq;
114 unsigned long msi_pages;
115 u8 root_busno;
116 struct device *dev;
117 struct irq_domain *irq_domain;
118 struct resource bus_range;
119 struct list_head resources;
120};
121
122static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
123
124static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
125{
126 return sys->private_data;
127}
128
129static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
130{
131 return readl(port->reg_base + reg);
132}
133
134static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
135{
136 writel(val, port->reg_base + reg);
137}
138
139static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
140{
141 return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
142 XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
143}
144
145/**
146 * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
147 * @port: PCIe port information
148 */
149static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
150{
151 u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
152
153 if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
154 dev_dbg(port->dev, "Requester ID %d\n",
155 val & XILINX_PCIE_RPEFR_REQ_ID);
156 pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
157 XILINX_PCIE_REG_RPEFR);
158 }
159}
160
161/**
162 * xilinx_pcie_valid_device - Check if a valid device is present on bus
163 * @bus: PCI Bus structure
164 * @devfn: device/function
165 *
166 * Return: 'true' on success and 'false' if invalid device is found
167 */
168static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
169{
170 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
171
172 /* Check if link is up when trying to access downstream ports */
173 if (bus->number != port->root_busno)
174 if (!xilinx_pcie_link_is_up(port))
175 return false;
176
177 /* Only one device down on each root port */
178 if (bus->number == port->root_busno && devfn > 0)
179 return false;
180
181 /*
182 * Do not read more than one device on the bus directly attached
183 * to RC.
184 */
185 if (bus->primary == port->root_busno && devfn > 0)
186 return false;
187
188 return true;
189}
190
191/**
192 * xilinx_pcie_config_base - Get configuration base
193 * @bus: PCI Bus structure
194 * @devfn: Device/function
195 * @where: Offset from base
196 *
197 * Return: Base address of the configuration space needed to be
198 * accessed.
199 */
200static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
201 unsigned int devfn, int where)
202{
203 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
204 int relbus;
205
206 relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
207 (devfn << ECAM_DEV_NUM_SHIFT);
208
209 return port->reg_base + relbus + where;
210}
211
212/**
213 * xilinx_pcie_read_config - Read configuration space
214 * @bus: PCI Bus structure
215 * @devfn: Device/function
216 * @where: Offset from base
217 * @size: Byte/word/dword
218 * @val: Value to be read
219 *
220 * Return: PCIBIOS_SUCCESSFUL on success
221 * PCIBIOS_DEVICE_NOT_FOUND on failure
222 */
223static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
224 int where, int size, u32 *val)
225{
226 void __iomem *addr;
227
228 if (!xilinx_pcie_valid_device(bus, devfn)) {
229 *val = 0xFFFFFFFF;
230 return PCIBIOS_DEVICE_NOT_FOUND;
231 }
232
233 addr = xilinx_pcie_config_base(bus, devfn, where);
234
235 switch (size) {
236 case 1:
237 *val = readb(addr);
238 break;
239 case 2:
240 *val = readw(addr);
241 break;
242 default:
243 *val = readl(addr);
244 break;
245 }
246
247 return PCIBIOS_SUCCESSFUL;
248}
249
250/**
251 * xilinx_pcie_write_config - Write configuration space
252 * @bus: PCI Bus structure
253 * @devfn: Device/function
254 * @where: Offset from base
255 * @size: Byte/word/dword
256 * @val: Value to be written to device
257 *
258 * Return: PCIBIOS_SUCCESSFUL on success
259 * PCIBIOS_DEVICE_NOT_FOUND on failure
260 */
261static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
262 int where, int size, u32 val)
263{
264 void __iomem *addr;
265
266 if (!xilinx_pcie_valid_device(bus, devfn))
267 return PCIBIOS_DEVICE_NOT_FOUND;
268
269 addr = xilinx_pcie_config_base(bus, devfn, where);
270
271 switch (size) {
272 case 1:
273 writeb(val, addr);
274 break;
275 case 2:
276 writew(val, addr);
277 break;
278 default:
279 writel(val, addr);
280 break;
281 }
282
283 return PCIBIOS_SUCCESSFUL;
284}
285
286/* PCIe operations */
287static struct pci_ops xilinx_pcie_ops = {
288 .read = xilinx_pcie_read_config,
289 .write = xilinx_pcie_write_config,
290};
291
292/* MSI functions */
293
294/**
295 * xilinx_pcie_destroy_msi - Free MSI number
296 * @irq: IRQ to be freed
297 */
298static void xilinx_pcie_destroy_msi(unsigned int irq)
299{
300 struct irq_desc *desc;
301 struct msi_desc *msi;
302 struct xilinx_pcie_port *port;
303
304 desc = irq_to_desc(irq);
305 msi = irq_desc_get_msi_desc(desc);
306 port = sys_to_pcie(msi->dev->bus->sysdata);
307
308 if (!test_bit(irq, msi_irq_in_use))
309 dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
310 else
311 clear_bit(irq, msi_irq_in_use);
312}
313
314/**
315 * xilinx_pcie_assign_msi - Allocate MSI number
316 * @port: PCIe port structure
317 *
318 * Return: A valid IRQ on success and error value on failure.
319 */
320static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
321{
322 int pos;
323
324 pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
325 if (pos < XILINX_NUM_MSI_IRQS)
326 set_bit(pos, msi_irq_in_use);
327 else
328 return -ENOSPC;
329
330 return pos;
331}
332
333/**
334 * xilinx_msi_teardown_irq - Destroy the MSI
335 * @chip: MSI Chip descriptor
336 * @irq: MSI IRQ to destroy
337 */
338static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
339{
340 xilinx_pcie_destroy_msi(irq);
341}
342
343/**
344 * xilinx_pcie_msi_setup_irq - Setup MSI request
345 * @chip: MSI chip pointer
346 * @pdev: PCIe device pointer
347 * @desc: MSI descriptor pointer
348 *
349 * Return: '0' on success and error value on failure
350 */
351static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
352 struct pci_dev *pdev,
353 struct msi_desc *desc)
354{
355 struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
356 unsigned int irq;
357 int hwirq;
358 struct msi_msg msg;
359 phys_addr_t msg_addr;
360
361 hwirq = xilinx_pcie_assign_msi(port);
362 if (hwirq < 0)
363 return hwirq;
364
365 irq = irq_create_mapping(port->irq_domain, hwirq);
366 if (!irq)
367 return -EINVAL;
368
369 irq_set_msi_desc(irq, desc);
370
371 msg_addr = virt_to_phys((void *)port->msi_pages);
372
373 msg.address_hi = 0;
374 msg.address_lo = msg_addr;
375 msg.data = irq;
376
377 write_msi_msg(irq, &msg);
378
379 return 0;
380}
381
382/* MSI Chip Descriptor */
383static struct msi_chip xilinx_pcie_msi_chip = {
384 .setup_irq = xilinx_pcie_msi_setup_irq,
385 .teardown_irq = xilinx_msi_teardown_irq,
386};
387
388/* HW Interrupt Chip Descriptor */
389static struct irq_chip xilinx_msi_irq_chip = {
390 .name = "Xilinx PCIe MSI",
391 .irq_enable = unmask_msi_irq,
392 .irq_disable = mask_msi_irq,
393 .irq_mask = mask_msi_irq,
394 .irq_unmask = unmask_msi_irq,
395};
396
397/**
398 * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
399 * @domain: IRQ domain
400 * @irq: Virtual IRQ number
401 * @hwirq: HW interrupt number
402 *
403 * Return: Always returns 0.
404 */
405static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
406 irq_hw_number_t hwirq)
407{
408 irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
409 irq_set_chip_data(irq, domain->host_data);
410 set_irq_flags(irq, IRQF_VALID);
411
412 return 0;
413}
414
415/* IRQ Domain operations */
416static const struct irq_domain_ops msi_domain_ops = {
417 .map = xilinx_pcie_msi_map,
418};
419
420/**
421 * xilinx_pcie_enable_msi - Enable MSI support
422 * @port: PCIe port information
423 */
424static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
425{
426 phys_addr_t msg_addr;
427
428 port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
429 msg_addr = virt_to_phys((void *)port->msi_pages);
430 pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
431 pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
432}
433
434/**
435 * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
436 * @bus: PCIe bus
437 */
438static void xilinx_pcie_add_bus(struct pci_bus *bus)
439{
440 if (IS_ENABLED(CONFIG_PCI_MSI)) {
441 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
442
443 xilinx_pcie_msi_chip.dev = port->dev;
444 bus->msi = &xilinx_pcie_msi_chip;
445 }
446}
447
448/* INTx Functions */
449
450/**
451 * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
452 * @domain: IRQ domain
453 * @irq: Virtual IRQ number
454 * @hwirq: HW interrupt number
455 *
456 * Return: Always returns 0.
457 */
458static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
459 irq_hw_number_t hwirq)
460{
461 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
462 irq_set_chip_data(irq, domain->host_data);
463 set_irq_flags(irq, IRQF_VALID);
464
465 return 0;
466}
467
468/* INTx IRQ Domain operations */
469static const struct irq_domain_ops intx_domain_ops = {
470 .map = xilinx_pcie_intx_map,
471};
472
473/* PCIe HW Functions */
474
475/**
476 * xilinx_pcie_intr_handler - Interrupt Service Handler
477 * @irq: IRQ number
478 * @data: PCIe port information
479 *
480 * Return: IRQ_HANDLED on success and IRQ_NONE on failure
481 */
482static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
483{
484 struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
485 u32 val, mask, status, msi_data;
486
487 /* Read interrupt decode and mask registers */
488 val = pcie_read(port, XILINX_PCIE_REG_IDR);
489 mask = pcie_read(port, XILINX_PCIE_REG_IMR);
490
491 status = val & mask;
492 if (!status)
493 return IRQ_NONE;
494
495 if (status & XILINX_PCIE_INTR_LINK_DOWN)
496 dev_warn(port->dev, "Link Down\n");
497
498 if (status & XILINX_PCIE_INTR_ECRC_ERR)
499 dev_warn(port->dev, "ECRC failed\n");
500
501 if (status & XILINX_PCIE_INTR_STR_ERR)
502 dev_warn(port->dev, "Streaming error\n");
503
504 if (status & XILINX_PCIE_INTR_HOT_RESET)
505 dev_info(port->dev, "Hot reset\n");
506
507 if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
508 dev_warn(port->dev, "ECAM access timeout\n");
509
510 if (status & XILINX_PCIE_INTR_CORRECTABLE) {
511 dev_warn(port->dev, "Correctable error message\n");
512 xilinx_pcie_clear_err_interrupts(port);
513 }
514
515 if (status & XILINX_PCIE_INTR_NONFATAL) {
516 dev_warn(port->dev, "Non fatal error message\n");
517 xilinx_pcie_clear_err_interrupts(port);
518 }
519
520 if (status & XILINX_PCIE_INTR_FATAL) {
521 dev_warn(port->dev, "Fatal error message\n");
522 xilinx_pcie_clear_err_interrupts(port);
523 }
524
525 if (status & XILINX_PCIE_INTR_INTX) {
526 /* INTx interrupt received */
527 val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
528
529 /* Check whether interrupt valid */
530 if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
531 dev_warn(port->dev, "RP Intr FIFO1 read error\n");
532 return IRQ_HANDLED;
533 }
534
535 /* Clear interrupt FIFO register 1 */
536 pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
537 XILINX_PCIE_REG_RPIFR1);
538
539 /* Handle INTx Interrupt */
540 val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
541 XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
542 generic_handle_irq(irq_find_mapping(port->irq_domain, val));
543 }
544
545 if (status & XILINX_PCIE_INTR_MSI) {
546 /* MSI Interrupt */
547 val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
548
549 if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
550 dev_warn(port->dev, "RP Intr FIFO1 read error\n");
551 return IRQ_HANDLED;
552 }
553
554 if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
555 msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
556 XILINX_PCIE_RPIFR2_MSG_DATA;
557
558 /* Clear interrupt FIFO register 1 */
559 pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
560 XILINX_PCIE_REG_RPIFR1);
561
562 if (IS_ENABLED(CONFIG_PCI_MSI)) {
563 /* Handle MSI Interrupt */
564 generic_handle_irq(msi_data);
565 }
566 }
567 }
568
569 if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
570 dev_warn(port->dev, "Slave unsupported request\n");
571
572 if (status & XILINX_PCIE_INTR_SLV_UNEXP)
573 dev_warn(port->dev, "Slave unexpected completion\n");
574
575 if (status & XILINX_PCIE_INTR_SLV_COMPL)
576 dev_warn(port->dev, "Slave completion timeout\n");
577
578 if (status & XILINX_PCIE_INTR_SLV_ERRP)
579 dev_warn(port->dev, "Slave Error Poison\n");
580
581 if (status & XILINX_PCIE_INTR_SLV_CMPABT)
582 dev_warn(port->dev, "Slave Completer Abort\n");
583
584 if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
585 dev_warn(port->dev, "Slave Illegal Burst\n");
586
587 if (status & XILINX_PCIE_INTR_MST_DECERR)
588 dev_warn(port->dev, "Master decode error\n");
589
590 if (status & XILINX_PCIE_INTR_MST_SLVERR)
591 dev_warn(port->dev, "Master slave error\n");
592
593 if (status & XILINX_PCIE_INTR_MST_ERRP)
594 dev_warn(port->dev, "Master error poison\n");
595
596 /* Clear the Interrupt Decode register */
597 pcie_write(port, status, XILINX_PCIE_REG_IDR);
598
599 return IRQ_HANDLED;
600}
601
602/**
603 * xilinx_pcie_free_irq_domain - Free IRQ domain
604 * @port: PCIe port information
605 */
606static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
607{
608 int i;
609 u32 irq, num_irqs;
610
611 /* Free IRQ Domain */
612 if (IS_ENABLED(CONFIG_PCI_MSI)) {
613
614 free_pages(port->msi_pages, 0);
615
616 num_irqs = XILINX_NUM_MSI_IRQS;
617 } else {
618 /* INTx */
619 num_irqs = 4;
620 }
621
622 for (i = 0; i < num_irqs; i++) {
623 irq = irq_find_mapping(port->irq_domain, i);
624 if (irq > 0)
625 irq_dispose_mapping(irq);
626 }
627
628 irq_domain_remove(port->irq_domain);
629}
630
631/**
632 * xilinx_pcie_init_irq_domain - Initialize IRQ domain
633 * @port: PCIe port information
634 *
635 * Return: '0' on success and error value on failure
636 */
637static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
638{
639 struct device *dev = port->dev;
640 struct device_node *node = dev->of_node;
641 struct device_node *pcie_intc_node;
642
643 /* Setup INTx */
644 pcie_intc_node = of_get_next_child(node, NULL);
645 if (!pcie_intc_node) {
646 dev_err(dev, "No PCIe Intc node found\n");
647 return PTR_ERR(pcie_intc_node);
648 }
649
650 port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
651 &intx_domain_ops,
652 port);
653 if (!port->irq_domain) {
654 dev_err(dev, "Failed to get a INTx IRQ domain\n");
655 return PTR_ERR(port->irq_domain);
656 }
657
658 /* Setup MSI */
659 if (IS_ENABLED(CONFIG_PCI_MSI)) {
660 port->irq_domain = irq_domain_add_linear(node,
661 XILINX_NUM_MSI_IRQS,
662 &msi_domain_ops,
663 &xilinx_pcie_msi_chip);
664 if (!port->irq_domain) {
665 dev_err(dev, "Failed to get a MSI IRQ domain\n");
666 return PTR_ERR(port->irq_domain);
667 }
668
669 xilinx_pcie_enable_msi(port);
670 }
671
672 return 0;
673}
674
675/**
676 * xilinx_pcie_init_port - Initialize hardware
677 * @port: PCIe port information
678 */
679static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
680{
681 if (xilinx_pcie_link_is_up(port))
682 dev_info(port->dev, "PCIe Link is UP\n");
683 else
684 dev_info(port->dev, "PCIe Link is DOWN\n");
685
686 /* Disable all interrupts */
687 pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
688 XILINX_PCIE_REG_IMR);
689
690 /* Clear pending interrupts */
691 pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
692 XILINX_PCIE_IMR_ALL_MASK,
693 XILINX_PCIE_REG_IDR);
694
695 /* Enable all interrupts */
696 pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
697
698 /* Enable the Bridge enable bit */
699 pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
700 XILINX_PCIE_REG_RPSC_BEN,
701 XILINX_PCIE_REG_RPSC);
702}
703
704/**
705 * xilinx_pcie_setup - Setup memory resources
706 * @nr: Bus number
707 * @sys: Per controller structure
708 *
709 * Return: '1' on success and error value on failure
710 */
711static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
712{
713 struct xilinx_pcie_port *port = sys_to_pcie(sys);
714
715 list_splice_init(&port->resources, &sys->resources);
716
717 return 1;
718}
719
720/**
721 * xilinx_pcie_scan_bus - Scan PCIe bus for devices
722 * @nr: Bus number
723 * @sys: Per controller structure
724 *
725 * Return: Valid Bus pointer on success and NULL on failure
726 */
727static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
728{
729 struct xilinx_pcie_port *port = sys_to_pcie(sys);
730 struct pci_bus *bus;
731
732 port->root_busno = sys->busnr;
733 bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
734 sys, &sys->resources);
735
736 return bus;
737}
738
739/**
740 * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
741 * @port: PCIe port information
742 *
743 * Return: '0' on success and error value on failure
744 */
745static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
746{
747 struct device *dev = port->dev;
748 struct device_node *node = dev->of_node;
749 struct resource *mem;
750 resource_size_t offset;
751 struct of_pci_range_parser parser;
752 struct of_pci_range range;
753 struct pci_host_bridge_window *win;
754 int err = 0, mem_resno = 0;
755
756 /* Get the ranges */
757 if (of_pci_range_parser_init(&parser, node)) {
758 dev_err(dev, "missing \"ranges\" property\n");
759 return -EINVAL;
760 }
761
762 /* Parse the ranges and add the resources found to the list */
763 for_each_of_pci_range(&parser, &range) {
764
765 if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
766 dev_err(dev, "Maximum memory resources exceeded\n");
767 return -EINVAL;
768 }
769
770 mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
771 if (!mem) {
772 err = -ENOMEM;
773 goto free_resources;
774 }
775
776 of_pci_range_to_resource(&range, node, mem);
777
778 switch (mem->flags & IORESOURCE_TYPE_BITS) {
779 case IORESOURCE_MEM:
780 offset = range.cpu_addr - range.pci_addr;
781 mem_resno++;
782 break;
783 default:
784 err = -EINVAL;
785 break;
786 }
787
788 if (err < 0) {
789 dev_warn(dev, "Invalid resource found %pR\n", mem);
790 continue;
791 }
792
793 err = request_resource(&iomem_resource, mem);
794 if (err)
795 goto free_resources;
796
797 pci_add_resource_offset(&port->resources, mem, offset);
798 }
799
800 /* Get the bus range */
801 if (of_pci_parse_bus_range(node, &port->bus_range)) {
802 u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
803 u8 last;
804
805 last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
806 XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
807
808 port->bus_range = (struct resource) {
809 .name = node->name,
810 .start = 0,
811 .end = last,
812 .flags = IORESOURCE_BUS,
813 };
814 }
815
816 /* Register bus resource */
817 pci_add_resource(&port->resources, &port->bus_range);
818
819 return 0;
820
821free_resources:
822 release_child_resources(&iomem_resource);
823 list_for_each_entry(win, &port->resources, list)
824 devm_kfree(dev, win->res);
825 pci_free_resource_list(&port->resources);
826
827 return err;
828}
829
830/**
831 * xilinx_pcie_parse_dt - Parse Device tree
832 * @port: PCIe port information
833 *
834 * Return: '0' on success and error value on failure
835 */
836static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
837{
838 struct device *dev = port->dev;
839 struct device_node *node = dev->of_node;
840 struct resource regs;
841 const char *type;
842 int err;
843
844 type = of_get_property(node, "device_type", NULL);
845 if (!type || strcmp(type, "pci")) {
846 dev_err(dev, "invalid \"device_type\" %s\n", type);
847 return -EINVAL;
848 }
849
850 err = of_address_to_resource(node, 0, &regs);
851 if (err) {
852 dev_err(dev, "missing \"reg\" property\n");
853 return err;
854 }
855
856 port->reg_base = devm_ioremap_resource(dev, &regs);
857 if (IS_ERR(port->reg_base))
858 return PTR_ERR(port->reg_base);
859
860 port->irq = irq_of_parse_and_map(node, 0);
861 err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
862 IRQF_SHARED, "xilinx-pcie", port);
863 if (err) {
864 dev_err(dev, "unable to request irq %d\n", port->irq);
865 return err;
866 }
867
868 return 0;
869}
870
871/**
872 * xilinx_pcie_probe - Probe function
873 * @pdev: Platform device pointer
874 *
875 * Return: '0' on success and error value on failure
876 */
877static int xilinx_pcie_probe(struct platform_device *pdev)
878{
879 struct xilinx_pcie_port *port;
880 struct hw_pci hw;
881 struct device *dev = &pdev->dev;
882 int err;
883
884 if (!dev->of_node)
885 return -ENODEV;
886
887 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
888 if (!port)
889 return -ENOMEM;
890
891 port->dev = dev;
892
893 err = xilinx_pcie_parse_dt(port);
894 if (err) {
895 dev_err(dev, "Parsing DT failed\n");
896 return err;
897 }
898
899 xilinx_pcie_init_port(port);
900
901 err = xilinx_pcie_init_irq_domain(port);
902 if (err) {
903 dev_err(dev, "Failed creating IRQ Domain\n");
904 return err;
905 }
906
907 /*
908 * Parse PCI ranges, configuration bus range and
909 * request their resources
910 */
911 INIT_LIST_HEAD(&port->resources);
912 err = xilinx_pcie_parse_and_add_res(port);
913 if (err) {
914 dev_err(dev, "Failed adding resources\n");
915 return err;
916 }
917
918 platform_set_drvdata(pdev, port);
919
920 /* Register the device */
921 memset(&hw, 0, sizeof(hw));
922 hw = (struct hw_pci) {
923 .nr_controllers = 1,
924 .private_data = (void **)&port,
925 .setup = xilinx_pcie_setup,
926 .map_irq = of_irq_parse_and_map_pci,
927 .add_bus = xilinx_pcie_add_bus,
928 .scan = xilinx_pcie_scan_bus,
929 .ops = &xilinx_pcie_ops,
930 };
931 pci_common_init_dev(dev, &hw);
932
933 return 0;
934}
935
936/**
937 * xilinx_pcie_remove - Remove function
938 * @pdev: Platform device pointer
939 *
940 * Return: '0' always
941 */
942static int xilinx_pcie_remove(struct platform_device *pdev)
943{
944 struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
945
946 xilinx_pcie_free_irq_domain(port);
947
948 return 0;
949}
950
951static struct of_device_id xilinx_pcie_of_match[] = {
952 { .compatible = "xlnx,axi-pcie-host-1.00.a", },
953 {}
954};
955
956static struct platform_driver xilinx_pcie_driver = {
957 .driver = {
958 .name = "xilinx-pcie",
959 .owner = THIS_MODULE,
960 .of_match_table = xilinx_pcie_of_match,
961 .suppress_bind_attrs = true,
962 },
963 .probe = xilinx_pcie_probe,
964 .remove = xilinx_pcie_remove,
965};
966module_platform_driver(xilinx_pcie_driver);
967
968MODULE_AUTHOR("Xilinx Inc");
969MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
970MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 3e6532b945c1..4a9aa08b08f1 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
24 24
25obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o 25obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
26 26
27pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o 27pci_hotplug-objs := pci_hotplug_core.o
28 28
29ifdef CONFIG_HOTPLUG_PCI_CPCI 29ifdef CONFIG_HOTPLUG_PCI_CPCI
30pci_hotplug-objs += cpci_hotplug_core.o \ 30pci_hotplug-objs += cpci_hotplug_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index a94d850ae228..876ccc620440 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -46,215 +46,6 @@
46 46
47static bool debug_acpi; 47static bool debug_acpi;
48 48
49static acpi_status
50decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
51{
52 int i;
53 union acpi_object *fields = record->package.elements;
54 u32 revision = fields[1].integer.value;
55
56 switch (revision) {
57 case 1:
58 if (record->package.count != 6)
59 return AE_ERROR;
60 for (i = 2; i < 6; i++)
61 if (fields[i].type != ACPI_TYPE_INTEGER)
62 return AE_ERROR;
63 hpx->t0 = &hpx->type0_data;
64 hpx->t0->revision = revision;
65 hpx->t0->cache_line_size = fields[2].integer.value;
66 hpx->t0->latency_timer = fields[3].integer.value;
67 hpx->t0->enable_serr = fields[4].integer.value;
68 hpx->t0->enable_perr = fields[5].integer.value;
69 break;
70 default:
71 printk(KERN_WARNING
72 "%s: Type 0 Revision %d record not supported\n",
73 __func__, revision);
74 return AE_ERROR;
75 }
76 return AE_OK;
77}
78
79static acpi_status
80decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
81{
82 int i;
83 union acpi_object *fields = record->package.elements;
84 u32 revision = fields[1].integer.value;
85
86 switch (revision) {
87 case 1:
88 if (record->package.count != 5)
89 return AE_ERROR;
90 for (i = 2; i < 5; i++)
91 if (fields[i].type != ACPI_TYPE_INTEGER)
92 return AE_ERROR;
93 hpx->t1 = &hpx->type1_data;
94 hpx->t1->revision = revision;
95 hpx->t1->max_mem_read = fields[2].integer.value;
96 hpx->t1->avg_max_split = fields[3].integer.value;
97 hpx->t1->tot_max_split = fields[4].integer.value;
98 break;
99 default:
100 printk(KERN_WARNING
101 "%s: Type 1 Revision %d record not supported\n",
102 __func__, revision);
103 return AE_ERROR;
104 }
105 return AE_OK;
106}
107
108static acpi_status
109decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
110{
111 int i;
112 union acpi_object *fields = record->package.elements;
113 u32 revision = fields[1].integer.value;
114
115 switch (revision) {
116 case 1:
117 if (record->package.count != 18)
118 return AE_ERROR;
119 for (i = 2; i < 18; i++)
120 if (fields[i].type != ACPI_TYPE_INTEGER)
121 return AE_ERROR;
122 hpx->t2 = &hpx->type2_data;
123 hpx->t2->revision = revision;
124 hpx->t2->unc_err_mask_and = fields[2].integer.value;
125 hpx->t2->unc_err_mask_or = fields[3].integer.value;
126 hpx->t2->unc_err_sever_and = fields[4].integer.value;
127 hpx->t2->unc_err_sever_or = fields[5].integer.value;
128 hpx->t2->cor_err_mask_and = fields[6].integer.value;
129 hpx->t2->cor_err_mask_or = fields[7].integer.value;
130 hpx->t2->adv_err_cap_and = fields[8].integer.value;
131 hpx->t2->adv_err_cap_or = fields[9].integer.value;
132 hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
133 hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
134 hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
135 hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
136 hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
137 hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
138 hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
139 hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
140 break;
141 default:
142 printk(KERN_WARNING
143 "%s: Type 2 Revision %d record not supported\n",
144 __func__, revision);
145 return AE_ERROR;
146 }
147 return AE_OK;
148}
149
150static acpi_status
151acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
152{
153 acpi_status status;
154 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
155 union acpi_object *package, *record, *fields;
156 u32 type;
157 int i;
158
159 /* Clear the return buffer with zeros */
160 memset(hpx, 0, sizeof(struct hotplug_params));
161
162 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
163 if (ACPI_FAILURE(status))
164 return status;
165
166 package = (union acpi_object *)buffer.pointer;
167 if (package->type != ACPI_TYPE_PACKAGE) {
168 status = AE_ERROR;
169 goto exit;
170 }
171
172 for (i = 0; i < package->package.count; i++) {
173 record = &package->package.elements[i];
174 if (record->type != ACPI_TYPE_PACKAGE) {
175 status = AE_ERROR;
176 goto exit;
177 }
178
179 fields = record->package.elements;
180 if (fields[0].type != ACPI_TYPE_INTEGER ||
181 fields[1].type != ACPI_TYPE_INTEGER) {
182 status = AE_ERROR;
183 goto exit;
184 }
185
186 type = fields[0].integer.value;
187 switch (type) {
188 case 0:
189 status = decode_type0_hpx_record(record, hpx);
190 if (ACPI_FAILURE(status))
191 goto exit;
192 break;
193 case 1:
194 status = decode_type1_hpx_record(record, hpx);
195 if (ACPI_FAILURE(status))
196 goto exit;
197 break;
198 case 2:
199 status = decode_type2_hpx_record(record, hpx);
200 if (ACPI_FAILURE(status))
201 goto exit;
202 break;
203 default:
204 printk(KERN_ERR "%s: Type %d record not supported\n",
205 __func__, type);
206 status = AE_ERROR;
207 goto exit;
208 }
209 }
210 exit:
211 kfree(buffer.pointer);
212 return status;
213}
214
215static acpi_status
216acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
217{
218 acpi_status status;
219 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
220 union acpi_object *package, *fields;
221 int i;
222
223 memset(hpp, 0, sizeof(struct hotplug_params));
224
225 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
226 if (ACPI_FAILURE(status))
227 return status;
228
229 package = (union acpi_object *) buffer.pointer;
230 if (package->type != ACPI_TYPE_PACKAGE ||
231 package->package.count != 4) {
232 status = AE_ERROR;
233 goto exit;
234 }
235
236 fields = package->package.elements;
237 for (i = 0; i < 4; i++) {
238 if (fields[i].type != ACPI_TYPE_INTEGER) {
239 status = AE_ERROR;
240 goto exit;
241 }
242 }
243
244 hpp->t0 = &hpp->type0_data;
245 hpp->t0->revision = 1;
246 hpp->t0->cache_line_size = fields[0].integer.value;
247 hpp->t0->latency_timer = fields[1].integer.value;
248 hpp->t0->enable_serr = fields[2].integer.value;
249 hpp->t0->enable_perr = fields[3].integer.value;
250
251exit:
252 kfree(buffer.pointer);
253 return status;
254}
255
256
257
258/* acpi_run_oshp - get control of hotplug from the firmware 49/* acpi_run_oshp - get control of hotplug from the firmware
259 * 50 *
260 * @handle - the handle of the hotplug controller. 51 * @handle - the handle of the hotplug controller.
@@ -283,48 +74,6 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
283 return status; 74 return status;
284} 75}
285 76
286/* pci_get_hp_params
287 *
288 * @dev - the pci_dev for which we want parameters
289 * @hpp - allocated by the caller
290 */
291int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
292{
293 acpi_status status;
294 acpi_handle handle, phandle;
295 struct pci_bus *pbus;
296
297 handle = NULL;
298 for (pbus = dev->bus; pbus; pbus = pbus->parent) {
299 handle = acpi_pci_get_bridge_handle(pbus);
300 if (handle)
301 break;
302 }
303
304 /*
305 * _HPP settings apply to all child buses, until another _HPP is
306 * encountered. If we don't find an _HPP for the input pci dev,
307 * look for it in the parent device scope since that would apply to
308 * this pci dev.
309 */
310 while (handle) {
311 status = acpi_run_hpx(handle, hpp);
312 if (ACPI_SUCCESS(status))
313 return 0;
314 status = acpi_run_hpp(handle, hpp);
315 if (ACPI_SUCCESS(status))
316 return 0;
317 if (acpi_is_root_bridge(handle))
318 break;
319 status = acpi_get_parent(handle, &phandle);
320 if (ACPI_FAILURE(status))
321 break;
322 handle = phandle;
323 }
324 return -ENODEV;
325}
326EXPORT_SYMBOL_GPL(pci_get_hp_params);
327
328/** 77/**
329 * acpi_get_hp_hw_control_from_firmware 78 * acpi_get_hp_hw_control_from_firmware
330 * @dev: the pci_dev of the bridge that has a hotplug controller 79 * @dev: the pci_dev of the bridge that has a hotplug controller
@@ -433,7 +182,8 @@ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
433{ 182{
434 acpi_handle bridge_handle, parent_handle; 183 acpi_handle bridge_handle, parent_handle;
435 184
436 if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) 185 bridge_handle = acpi_pci_get_bridge_handle(pbus);
186 if (!bridge_handle)
437 return 0; 187 return 0;
438 if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) 188 if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle))))
439 return 0; 189 return 0;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 70741c8c46a0..a6f8e0ba0bfe 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,7 +61,6 @@ static DEFINE_MUTEX(bridge_mutex);
61static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type); 61static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
62static void acpiphp_post_dock_fixup(struct acpi_device *adev); 62static void acpiphp_post_dock_fixup(struct acpi_device *adev);
63static void acpiphp_sanitize_bus(struct pci_bus *bus); 63static void acpiphp_sanitize_bus(struct pci_bus *bus);
64static void acpiphp_set_hpp_values(struct pci_bus *bus);
65static void hotplug_event(u32 type, struct acpiphp_context *context); 64static void hotplug_event(u32 type, struct acpiphp_context *context);
66static void free_bridge(struct kref *kref); 65static void free_bridge(struct kref *kref);
67 66
@@ -510,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
510 __pci_bus_assign_resources(bus, &add_list, NULL); 509 __pci_bus_assign_resources(bus, &add_list, NULL);
511 510
512 acpiphp_sanitize_bus(bus); 511 acpiphp_sanitize_bus(bus);
513 acpiphp_set_hpp_values(bus); 512 pcie_bus_configure_settings(bus);
514 acpiphp_set_acpi_region(slot); 513 acpiphp_set_acpi_region(slot);
515 514
516 list_for_each_entry(dev, &bus->devices, bus_list) { 515 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -702,14 +701,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
702 } 701 }
703} 702}
704 703
705static void acpiphp_set_hpp_values(struct pci_bus *bus)
706{
707 struct pci_dev *dev;
708
709 list_for_each_entry(dev, &bus->devices, bus_list)
710 pci_configure_slot(dev);
711}
712
713/* 704/*
714 * Remove devices for which we could not assign resources, call 705 * Remove devices for which we could not assign resources, call
715 * arch specific code to fix-up the bus 706 * arch specific code to fix-up the bus
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8dcccffd6e21..6ca23998ee8f 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -302,7 +302,7 @@ static int ibm_get_table_from_acpi(char **bufp)
302 goto read_table_done; 302 goto read_table_done;
303 } 303 }
304 304
305 for(size = 0, i = 0; i < package->package.count; i++) { 305 for (size = 0, i = 0; i < package->package.count; i++) {
306 if (package->package.elements[i].type != ACPI_TYPE_BUFFER) { 306 if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
307 pr_err("%s: Invalid APCI element %d\n", __func__, i); 307 pr_err("%s: Invalid APCI element %d\n", __func__, i);
308 goto read_table_done; 308 goto read_table_done;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index e09cf7827d68..a5a7fd8332ac 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -125,7 +125,8 @@ disable_slot(struct hotplug_slot *hotplug_slot)
125 125
126 /* Unconfigure device */ 126 /* Unconfigure device */
127 dbg("%s - unconfiguring slot %s", __func__, slot_name(slot)); 127 dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
128 if ((retval = cpci_unconfigure_slot(slot))) { 128 retval = cpci_unconfigure_slot(slot);
129 if (retval) {
129 err("%s - could not unconfigure slot %s", 130 err("%s - could not unconfigure slot %s",
130 __func__, slot_name(slot)); 131 __func__, slot_name(slot));
131 goto disable_error; 132 goto disable_error;
@@ -141,9 +142,11 @@ disable_slot(struct hotplug_slot *hotplug_slot)
141 } 142 }
142 cpci_led_on(slot); 143 cpci_led_on(slot);
143 144
144 if (controller->ops->set_power) 145 if (controller->ops->set_power) {
145 if ((retval = controller->ops->set_power(slot, 0))) 146 retval = controller->ops->set_power(slot, 0);
147 if (retval)
146 goto disable_error; 148 goto disable_error;
149 }
147 150
148 if (update_adapter_status(slot->hotplug_slot, 0)) 151 if (update_adapter_status(slot->hotplug_slot, 0))
149 warn("failure to update adapter file"); 152 warn("failure to update adapter file");
@@ -467,9 +470,9 @@ check_slots(void)
467 __func__, slot_name(slot), hs_csr); 470 __func__, slot_name(slot), hs_csr);
468 471
469 if (!slot->extracting) { 472 if (!slot->extracting) {
470 if (update_latch_status(slot->hotplug_slot, 0)) { 473 if (update_latch_status(slot->hotplug_slot, 0))
471 warn("failure to update latch file"); 474 warn("failure to update latch file");
472 } 475
473 slot->extracting = 1; 476 slot->extracting = 1;
474 atomic_inc(&extracting); 477 atomic_inc(&extracting);
475 } 478 }
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index 04fcd7811400..66b7bbebe493 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -56,7 +56,7 @@
56 if (debug) \ 56 if (debug) \
57 printk (KERN_DEBUG "%s: " format "\n", \ 57 printk (KERN_DEBUG "%s: " format "\n", \
58 MY_NAME , ## arg); \ 58 MY_NAME , ## arg); \
59 } while(0) 59 } while (0)
60#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 60#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
61#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 61#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
62#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg) 62#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,28 +82,28 @@ static int __init validate_parameters(void)
82 char *p; 82 char *p;
83 unsigned long tmp; 83 unsigned long tmp;
84 84
85 if(!bridge) { 85 if (!bridge) {
86 info("not configured, disabling."); 86 info("not configured, disabling.");
87 return -EINVAL; 87 return -EINVAL;
88 } 88 }
89 str = bridge; 89 str = bridge;
90 if(!*str) 90 if (!*str)
91 return -EINVAL; 91 return -EINVAL;
92 92
93 tmp = simple_strtoul(str, &p, 16); 93 tmp = simple_strtoul(str, &p, 16);
94 if(p == str || tmp > 0xff) { 94 if (p == str || tmp > 0xff) {
95 err("Invalid hotplug bus bridge device bus number"); 95 err("Invalid hotplug bus bridge device bus number");
96 return -EINVAL; 96 return -EINVAL;
97 } 97 }
98 bridge_busnr = (u8) tmp; 98 bridge_busnr = (u8) tmp;
99 dbg("bridge_busnr = 0x%02x", bridge_busnr); 99 dbg("bridge_busnr = 0x%02x", bridge_busnr);
100 if(*p != ':') { 100 if (*p != ':') {
101 err("Invalid hotplug bus bridge device"); 101 err("Invalid hotplug bus bridge device");
102 return -EINVAL; 102 return -EINVAL;
103 } 103 }
104 str = p + 1; 104 str = p + 1;
105 tmp = simple_strtoul(str, &p, 16); 105 tmp = simple_strtoul(str, &p, 16);
106 if(p == str || tmp > 0x1f) { 106 if (p == str || tmp > 0x1f) {
107 err("Invalid hotplug bus bridge device slot number"); 107 err("Invalid hotplug bus bridge device slot number");
108 return -EINVAL; 108 return -EINVAL;
109 } 109 }
@@ -112,18 +112,18 @@ static int __init validate_parameters(void)
112 112
113 dbg("first_slot = 0x%02x", first_slot); 113 dbg("first_slot = 0x%02x", first_slot);
114 dbg("last_slot = 0x%02x", last_slot); 114 dbg("last_slot = 0x%02x", last_slot);
115 if(!(first_slot && last_slot)) { 115 if (!(first_slot && last_slot)) {
116 err("Need to specify first_slot and last_slot"); 116 err("Need to specify first_slot and last_slot");
117 return -EINVAL; 117 return -EINVAL;
118 } 118 }
119 if(last_slot < first_slot) { 119 if (last_slot < first_slot) {
120 err("first_slot must be less than last_slot"); 120 err("first_slot must be less than last_slot");
121 return -EINVAL; 121 return -EINVAL;
122 } 122 }
123 123
124 dbg("port = 0x%04x", port); 124 dbg("port = 0x%04x", port);
125 dbg("enum_bit = 0x%02x", enum_bit); 125 dbg("enum_bit = 0x%02x", enum_bit);
126 if(enum_bit > 7) { 126 if (enum_bit > 7) {
127 err("Invalid #ENUM bit"); 127 err("Invalid #ENUM bit");
128 return -EINVAL; 128 return -EINVAL;
129 } 129 }
@@ -151,12 +151,12 @@ static int __init cpcihp_generic_init(void)
151 return status; 151 return status;
152 152
153 r = request_region(port, 1, "#ENUM hotswap signal register"); 153 r = request_region(port, 1, "#ENUM hotswap signal register");
154 if(!r) 154 if (!r)
155 return -EBUSY; 155 return -EBUSY;
156 156
157 dev = pci_get_domain_bus_and_slot(0, bridge_busnr, 157 dev = pci_get_domain_bus_and_slot(0, bridge_busnr,
158 PCI_DEVFN(bridge_slot, 0)); 158 PCI_DEVFN(bridge_slot, 0));
159 if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { 159 if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
160 err("Invalid bridge device %s", bridge); 160 err("Invalid bridge device %s", bridge);
161 pci_dev_put(dev); 161 pci_dev_put(dev);
162 return -EINVAL; 162 return -EINVAL;
@@ -169,21 +169,21 @@ static int __init cpcihp_generic_init(void)
169 generic_hpc.ops = &generic_hpc_ops; 169 generic_hpc.ops = &generic_hpc_ops;
170 170
171 status = cpci_hp_register_controller(&generic_hpc); 171 status = cpci_hp_register_controller(&generic_hpc);
172 if(status != 0) { 172 if (status != 0) {
173 err("Could not register cPCI hotplug controller"); 173 err("Could not register cPCI hotplug controller");
174 return -ENODEV; 174 return -ENODEV;
175 } 175 }
176 dbg("registered controller"); 176 dbg("registered controller");
177 177
178 status = cpci_hp_register_bus(bus, first_slot, last_slot); 178 status = cpci_hp_register_bus(bus, first_slot, last_slot);
179 if(status != 0) { 179 if (status != 0) {
180 err("Could not register cPCI hotplug bus"); 180 err("Could not register cPCI hotplug bus");
181 goto init_bus_register_error; 181 goto init_bus_register_error;
182 } 182 }
183 dbg("registered bus"); 183 dbg("registered bus");
184 184
185 status = cpci_hp_start(); 185 status = cpci_hp_start();
186 if(status != 0) { 186 if (status != 0) {
187 err("Could not started cPCI hotplug system"); 187 err("Could not started cPCI hotplug system");
188 goto init_start_error; 188 goto init_start_error;
189 } 189 }
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 6757b3ef7e10..7ecf34e76a61 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -51,7 +51,7 @@
51 if (debug) \ 51 if (debug) \
52 printk (KERN_DEBUG "%s: " format "\n", \ 52 printk (KERN_DEBUG "%s: " format "\n", \
53 MY_NAME , ## arg); \ 53 MY_NAME , ## arg); \
54 } while(0) 54 } while (0)
55#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) 55#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
56#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) 56#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
57#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg) 57#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,13 +82,13 @@ static int zt5550_hc_config(struct pci_dev *pdev)
82 int ret; 82 int ret;
83 83
84 /* Since we know that no boards exist with two HC chips, treat it as an error */ 84 /* Since we know that no boards exist with two HC chips, treat it as an error */
85 if(hc_dev) { 85 if (hc_dev) {
86 err("too many host controller devices?"); 86 err("too many host controller devices?");
87 return -EBUSY; 87 return -EBUSY;
88 } 88 }
89 89
90 ret = pci_enable_device(pdev); 90 ret = pci_enable_device(pdev);
91 if(ret) { 91 if (ret) {
92 err("cannot enable %s\n", pci_name(pdev)); 92 err("cannot enable %s\n", pci_name(pdev));
93 return ret; 93 return ret;
94 } 94 }
@@ -98,7 +98,7 @@ static int zt5550_hc_config(struct pci_dev *pdev)
98 dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1)); 98 dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1));
99 dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1)); 99 dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1));
100 100
101 if(!request_mem_region(pci_resource_start(hc_dev, 1), 101 if (!request_mem_region(pci_resource_start(hc_dev, 1),
102 pci_resource_len(hc_dev, 1), MY_NAME)) { 102 pci_resource_len(hc_dev, 1), MY_NAME)) {
103 err("cannot reserve MMIO region"); 103 err("cannot reserve MMIO region");
104 ret = -ENOMEM; 104 ret = -ENOMEM;
@@ -107,7 +107,7 @@ static int zt5550_hc_config(struct pci_dev *pdev)
107 107
108 hc_registers = 108 hc_registers =
109 ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); 109 ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1));
110 if(!hc_registers) { 110 if (!hc_registers) {
111 err("cannot remap MMIO region %llx @ %llx", 111 err("cannot remap MMIO region %llx @ %llx",
112 (unsigned long long)pci_resource_len(hc_dev, 1), 112 (unsigned long long)pci_resource_len(hc_dev, 1),
113 (unsigned long long)pci_resource_start(hc_dev, 1)); 113 (unsigned long long)pci_resource_start(hc_dev, 1));
@@ -146,7 +146,7 @@ exit_disable_device:
146 146
147static int zt5550_hc_cleanup(void) 147static int zt5550_hc_cleanup(void)
148{ 148{
149 if(!hc_dev) 149 if (!hc_dev)
150 return -ENODEV; 150 return -ENODEV;
151 151
152 iounmap(hc_registers); 152 iounmap(hc_registers);
@@ -170,9 +170,9 @@ static int zt5550_hc_check_irq(void *dev_id)
170 u8 reg; 170 u8 reg;
171 171
172 ret = 0; 172 ret = 0;
173 if(dev_id == zt5550_hpc.dev_id) { 173 if (dev_id == zt5550_hpc.dev_id) {
174 reg = readb(csr_int_status); 174 reg = readb(csr_int_status);
175 if(reg) 175 if (reg)
176 ret = 1; 176 ret = 1;
177 } 177 }
178 return ret; 178 return ret;
@@ -182,9 +182,9 @@ static int zt5550_hc_enable_irq(void)
182{ 182{
183 u8 reg; 183 u8 reg;
184 184
185 if(hc_dev == NULL) { 185 if (hc_dev == NULL)
186 return -ENODEV; 186 return -ENODEV;
187 } 187
188 reg = readb(csr_int_mask); 188 reg = readb(csr_int_mask);
189 reg = reg & ~ENUM_INT_MASK; 189 reg = reg & ~ENUM_INT_MASK;
190 writeb(reg, csr_int_mask); 190 writeb(reg, csr_int_mask);
@@ -195,9 +195,8 @@ static int zt5550_hc_disable_irq(void)
195{ 195{
196 u8 reg; 196 u8 reg;
197 197
198 if(hc_dev == NULL) { 198 if (hc_dev == NULL)
199 return -ENODEV; 199 return -ENODEV;
200 }
201 200
202 reg = readb(csr_int_mask); 201 reg = readb(csr_int_mask);
203 reg = reg | ENUM_INT_MASK; 202 reg = reg | ENUM_INT_MASK;
@@ -210,15 +209,15 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
210 int status; 209 int status;
211 210
212 status = zt5550_hc_config(pdev); 211 status = zt5550_hc_config(pdev);
213 if(status != 0) { 212 if (status != 0)
214 return status; 213 return status;
215 } 214
216 dbg("returned from zt5550_hc_config"); 215 dbg("returned from zt5550_hc_config");
217 216
218 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller)); 217 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
219 zt5550_hpc_ops.query_enum = zt5550_hc_query_enum; 218 zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
220 zt5550_hpc.ops = &zt5550_hpc_ops; 219 zt5550_hpc.ops = &zt5550_hpc_ops;
221 if(!poll) { 220 if (!poll) {
222 zt5550_hpc.irq = hc_dev->irq; 221 zt5550_hpc.irq = hc_dev->irq;
223 zt5550_hpc.irq_flags = IRQF_SHARED; 222 zt5550_hpc.irq_flags = IRQF_SHARED;
224 zt5550_hpc.dev_id = hc_dev; 223 zt5550_hpc.dev_id = hc_dev;
@@ -231,15 +230,16 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
231 } 230 }
232 231
233 status = cpci_hp_register_controller(&zt5550_hpc); 232 status = cpci_hp_register_controller(&zt5550_hpc);
234 if(status != 0) { 233 if (status != 0) {
235 err("could not register cPCI hotplug controller"); 234 err("could not register cPCI hotplug controller");
236 goto init_hc_error; 235 goto init_hc_error;
237 } 236 }
238 dbg("registered controller"); 237 dbg("registered controller");
239 238
240 /* Look for first device matching cPCI bus's bridge vendor and device IDs */ 239 /* Look for first device matching cPCI bus's bridge vendor and device IDs */
241 if(!(bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC, 240 bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
242 PCI_DEVICE_ID_DEC_21154, NULL))) { 241 PCI_DEVICE_ID_DEC_21154, NULL);
242 if (!bus0_dev) {
243 status = -ENODEV; 243 status = -ENODEV;
244 goto init_register_error; 244 goto init_register_error;
245 } 245 }
@@ -247,14 +247,14 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
247 pci_dev_put(bus0_dev); 247 pci_dev_put(bus0_dev);
248 248
249 status = cpci_hp_register_bus(bus0, 0x0a, 0x0f); 249 status = cpci_hp_register_bus(bus0, 0x0a, 0x0f);
250 if(status != 0) { 250 if (status != 0) {
251 err("could not register cPCI hotplug bus"); 251 err("could not register cPCI hotplug bus");
252 goto init_register_error; 252 goto init_register_error;
253 } 253 }
254 dbg("registered bus"); 254 dbg("registered bus");
255 255
256 status = cpci_hp_start(); 256 status = cpci_hp_start();
257 if(status != 0) { 257 if (status != 0) {
258 err("could not started cPCI hotplug system"); 258 err("could not started cPCI hotplug system");
259 cpci_hp_unregister_bus(bus0); 259 cpci_hp_unregister_bus(bus0);
260 goto init_register_error; 260 goto init_register_error;
@@ -300,11 +300,11 @@ static int __init zt5550_init(void)
300 300
301 info(DRIVER_DESC " version: " DRIVER_VERSION); 301 info(DRIVER_DESC " version: " DRIVER_VERSION);
302 r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register"); 302 r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register");
303 if(!r) 303 if (!r)
304 return -EBUSY; 304 return -EBUSY;
305 305
306 rc = pci_register_driver(&zt5550_hc_driver); 306 rc = pci_register_driver(&zt5550_hc_driver);
307 if(rc < 0) 307 if (rc < 0)
308 release_region(ENUM_PORT, 1); 308 release_region(ENUM_PORT, 1);
309 return rc; 309 return rc;
310} 310}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 0450f405807d..b28b2d2184cd 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -690,7 +690,7 @@ static inline int cpq_get_latch_status(struct controller *ctrl,
690 690
691 status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)); 691 status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot));
692 692
693 return(status == 0) ? 1 : 0; 693 return (status == 0) ? 1 : 0;
694} 694}
695 695
696 696
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4aaee746df88..a53084ddc118 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -1096,9 +1096,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1096 1096
1097 /* initialize our threads if they haven't already been started up */ 1097 /* initialize our threads if they haven't already been started up */
1098 rc = one_time_init(); 1098 rc = one_time_init();
1099 if (rc) { 1099 if (rc)
1100 goto err_free_bus; 1100 goto err_free_bus;
1101 }
1102 1101
1103 dbg("pdev = %p\n", pdev); 1102 dbg("pdev = %p\n", pdev);
1104 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0)); 1103 dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index bde47fce3248..c5cbefee5236 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -705,9 +705,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
705 if (temp == max) { 705 if (temp == max) {
706 *head = max->next; 706 *head = max->next;
707 } else { 707 } else {
708 while (temp && temp->next != max) { 708 while (temp && temp->next != max)
709 temp = temp->next; 709 temp = temp->next;
710 }
711 710
712 if (temp) 711 if (temp)
713 temp->next = max->next; 712 temp->next = max->next;
@@ -903,9 +902,8 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
903 /* 902 /*
904 * Check to see if it was our interrupt 903 * Check to see if it was our interrupt
905 */ 904 */
906 if (!(misc & 0x000C)) { 905 if (!(misc & 0x000C))
907 return IRQ_NONE; 906 return IRQ_NONE;
908 }
909 907
910 if (misc & 0x0004) { 908 if (misc & 0x0004) {
911 /* 909 /*
@@ -1143,7 +1141,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1143 /* We don't allow freq/mode changes if we find another adapter running 1141 /* We don't allow freq/mode changes if we find another adapter running
1144 * in another slot on this controller 1142 * in another slot on this controller
1145 */ 1143 */
1146 for(slot = ctrl->slot; slot; slot = slot->next) { 1144 for (slot = ctrl->slot; slot; slot = slot->next) {
1147 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1145 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1148 continue; 1146 continue;
1149 if (!slot->hotplug_slot || !slot->hotplug_slot->info) 1147 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
@@ -1193,7 +1191,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1193 1191
1194 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); 1192 reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
1195 reg16 &= ~0x000F; 1193 reg16 &= ~0x000F;
1196 switch(adapter_speed) { 1194 switch (adapter_speed) {
1197 case(PCI_SPEED_133MHz_PCIX): 1195 case(PCI_SPEED_133MHz_PCIX):
1198 reg = 0x75; 1196 reg = 0x75;
1199 reg16 |= 0xB; 1197 reg16 |= 0xB;
@@ -2006,9 +2004,8 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
2006 /* Check to see if the interlock is closed */ 2004 /* Check to see if the interlock is closed */
2007 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); 2005 tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
2008 2006
2009 if (tempdword & (0x01 << hp_slot)) { 2007 if (tempdword & (0x01 << hp_slot))
2010 return 1; 2008 return 1;
2011 }
2012 2009
2013 if (func->is_a_board) { 2010 if (func->is_a_board) {
2014 rc = board_replaced(func, ctrl); 2011 rc = board_replaced(func, ctrl);
@@ -2070,9 +2067,8 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
2070 } 2067 }
2071 } 2068 }
2072 2069
2073 if (rc) { 2070 if (rc)
2074 dbg("%s: rc = %d\n", __func__, rc); 2071 dbg("%s: rc = %d\n", __func__, rc);
2075 }
2076 2072
2077 if (p_slot) 2073 if (p_slot)
2078 update_slot_info(ctrl, p_slot); 2074 update_slot_info(ctrl, p_slot);
@@ -2095,9 +2091,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
2095 device = func->device; 2091 device = func->device;
2096 func = cpqhp_slot_find(ctrl->bus, device, index++); 2092 func = cpqhp_slot_find(ctrl->bus, device, index++);
2097 p_slot = cpqhp_find_slot(ctrl, device); 2093 p_slot = cpqhp_find_slot(ctrl, device);
2098 if (p_slot) { 2094 if (p_slot)
2099 physical_slot = p_slot->number; 2095 physical_slot = p_slot->number;
2100 }
2101 2096
2102 /* Make sure there are no video controllers here */ 2097 /* Make sure there are no video controllers here */
2103 while (func && !rc) { 2098 while (func && !rc) {
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 0968a9bcb345..1e08ff8c229c 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -204,9 +204,8 @@ static int load_HRT (void __iomem *rom_start)
204 u8 temp_byte = 0xFF; 204 u8 temp_byte = 0xFF;
205 u32 rc; 205 u32 rc;
206 206
207 if (!check_for_compaq_ROM(rom_start)) { 207 if (!check_for_compaq_ROM(rom_start))
208 return -ENODEV; 208 return -ENODEV;
209 }
210 209
211 available = 1024; 210 available = 1024;
212 211
@@ -250,9 +249,8 @@ static u32 store_HRT (void __iomem *rom_start)
250 249
251 available = 1024; 250 available = 1024;
252 251
253 if (!check_for_compaq_ROM(rom_start)) { 252 if (!check_for_compaq_ROM(rom_start))
254 return(1); 253 return(1);
255 }
256 254
257 buffer = (u32*) evbuffer; 255 buffer = (u32*) evbuffer;
258 256
@@ -427,9 +425,9 @@ static u32 store_HRT (void __iomem *rom_start)
427 425
428void compaq_nvram_init (void __iomem *rom_start) 426void compaq_nvram_init (void __iomem *rom_start)
429{ 427{
430 if (rom_start) { 428 if (rom_start)
431 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); 429 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
432 } 430
433 dbg("int15 entry = %p\n", compaq_int15_entry_point); 431 dbg("int15 entry = %p\n", compaq_int15_entry_point);
434 432
435 /* initialize our int15 lock */ 433 /* initialize our int15 lock */
@@ -661,9 +659,8 @@ int compaq_nvram_store (void __iomem *rom_start)
661 659
662 if (evbuffer_init) { 660 if (evbuffer_init) {
663 rc = store_HRT(rom_start); 661 rc = store_HRT(rom_start);
664 if (rc) { 662 if (rc)
665 err(msg_unable_to_save); 663 err(msg_unable_to_save);
666 }
667 } 664 }
668 return rc; 665 return rc;
669} 666}
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index f7b8684a7739..3efaf4c38528 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1023,7 +1023,8 @@ static int enable_slot(struct hotplug_slot *hs)
1023 debug("ENABLING SLOT........\n"); 1023 debug("ENABLING SLOT........\n");
1024 slot_cur = hs->private; 1024 slot_cur = hs->private;
1025 1025
1026 if ((rc = validate(slot_cur, ENABLE))) { 1026 rc = validate(slot_cur, ENABLE);
1027 if (rc) {
1027 err("validate function failed\n"); 1028 err("validate function failed\n");
1028 goto error_nopower; 1029 goto error_nopower;
1029 } 1030 }
@@ -1199,9 +1200,8 @@ int ibmphp_do_disable_slot(struct slot *slot_cur)
1199 1200
1200 debug("DISABLING SLOT...\n"); 1201 debug("DISABLING SLOT...\n");
1201 1202
1202 if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) { 1203 if ((slot_cur == NULL) || (slot_cur->ctrl == NULL))
1203 return -ENODEV; 1204 return -ENODEV;
1204 }
1205 1205
1206 flag = slot_cur->flag; 1206 flag = slot_cur->flag;
1207 slot_cur->flag = 1; 1207 slot_cur->flag = 1;
@@ -1336,17 +1336,20 @@ static int __init ibmphp_init(void)
1336 for (i = 0; i < 16; i++) 1336 for (i = 0; i < 16; i++)
1337 irqs[i] = 0; 1337 irqs[i] = 0;
1338 1338
1339 if ((rc = ibmphp_access_ebda())) 1339 rc = ibmphp_access_ebda();
1340 if (rc)
1340 goto error; 1341 goto error;
1341 debug("after ibmphp_access_ebda()\n"); 1342 debug("after ibmphp_access_ebda()\n");
1342 1343
1343 if ((rc = ibmphp_rsrc_init())) 1344 rc = ibmphp_rsrc_init();
1345 if (rc)
1344 goto error; 1346 goto error;
1345 debug("AFTER Resource & EBDA INITIALIZATIONS\n"); 1347 debug("AFTER Resource & EBDA INITIALIZATIONS\n");
1346 1348
1347 max_slots = get_max_slots(); 1349 max_slots = get_max_slots();
1348 1350
1349 if ((rc = ibmphp_register_pci())) 1351 rc = ibmphp_register_pci();
1352 if (rc)
1350 goto error; 1353 goto error;
1351 1354
1352 if (init_ops()) { 1355 if (init_ops()) {
@@ -1355,9 +1358,9 @@ static int __init ibmphp_init(void)
1355 } 1358 }
1356 1359
1357 ibmphp_print_test(); 1360 ibmphp_print_test();
1358 if ((rc = ibmphp_hpc_start_poll_thread())) { 1361 rc = ibmphp_hpc_start_poll_thread();
1362 if (rc)
1359 goto error; 1363 goto error;
1360 }
1361 1364
1362exit: 1365exit:
1363 return rc; 1366 return rc;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 0f65ac555434..d9b197d5c6b4 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -215,9 +215,8 @@ static void __init print_ebda_hpc (void)
215 debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap); 215 debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap);
216 } 216 }
217 217
218 for (index = 0; index < hpc_ptr->bus_count; index++) { 218 for (index = 0; index < hpc_ptr->bus_count; index++)
219 debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num); 219 debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num);
220 }
221 220
222 debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type); 221 debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type);
223 switch (hpc_ptr->ctlr_type) { 222 switch (hpc_ptr->ctlr_type) {
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index a936022956e6..220876715a08 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -997,9 +997,8 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
997 rc = ibmphp_do_disable_slot (pslot); 997 rc = ibmphp_do_disable_slot (pslot);
998 } 998 }
999 999
1000 if (update || disable) { 1000 if (update || disable)
1001 ibmphp_update_slot_info (pslot); 1001 ibmphp_update_slot_info (pslot);
1002 }
1003 1002
1004 debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update); 1003 debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update);
1005 1004
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 2fd296706ce7..814cea22a9fa 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -145,7 +145,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
145 case PCI_HEADER_TYPE_NORMAL: 145 case PCI_HEADER_TYPE_NORMAL:
146 debug ("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class); 146 debug ("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class);
147 assign_alt_irq (cur_func, class_code); 147 assign_alt_irq (cur_func, class_code);
148 if ((rc = configure_device (cur_func)) < 0) { 148 rc = configure_device(cur_func);
149 if (rc < 0) {
149 /* We need to do this in case some other BARs were properly inserted */ 150 /* We need to do this in case some other BARs were properly inserted */
150 err ("was not able to configure devfunc %x on bus %x.\n", 151 err ("was not able to configure devfunc %x on bus %x.\n",
151 cur_func->device, cur_func->busno); 152 cur_func->device, cur_func->busno);
@@ -157,7 +158,8 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
157 break; 158 break;
158 case PCI_HEADER_TYPE_MULTIDEVICE: 159 case PCI_HEADER_TYPE_MULTIDEVICE:
159 assign_alt_irq (cur_func, class_code); 160 assign_alt_irq (cur_func, class_code);
160 if ((rc = configure_device (cur_func)) < 0) { 161 rc = configure_device(cur_func);
162 if (rc < 0) {
161 /* We need to do this in case some other BARs were properly inserted */ 163 /* We need to do this in case some other BARs were properly inserted */
162 err ("was not able to configure devfunc %x on bus %x...bailing out\n", 164 err ("was not able to configure devfunc %x on bus %x...bailing out\n",
163 cur_func->device, cur_func->busno); 165 cur_func->device, cur_func->busno);
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index f34745abd5b6..219ba8090a37 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -224,7 +224,8 @@ int __init ibmphp_rsrc_init (void)
224 if ((curr->rsrc_type & RESTYPE) == MMASK) { 224 if ((curr->rsrc_type & RESTYPE) == MMASK) {
225 /* no bus structure exists in place yet */ 225 /* no bus structure exists in place yet */
226 if (list_empty (&gbuses)) { 226 if (list_empty (&gbuses)) {
227 if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1))) 227 rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
228 if (rc)
228 return rc; 229 return rc;
229 list_add_tail (&newbus->bus_list, &gbuses); 230 list_add_tail (&newbus->bus_list, &gbuses);
230 debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 231 debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -237,7 +238,8 @@ int __init ibmphp_rsrc_init (void)
237 return rc; 238 return rc;
238 } else { 239 } else {
239 /* went through all the buses and didn't find ours, need to create a new bus node */ 240 /* went through all the buses and didn't find ours, need to create a new bus node */
240 if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1))) 241 rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
242 if (rc)
241 return rc; 243 return rc;
242 244
243 list_add_tail (&newbus->bus_list, &gbuses); 245 list_add_tail (&newbus->bus_list, &gbuses);
@@ -248,7 +250,8 @@ int __init ibmphp_rsrc_init (void)
248 /* prefetchable memory */ 250 /* prefetchable memory */
249 if (list_empty (&gbuses)) { 251 if (list_empty (&gbuses)) {
250 /* no bus structure exists in place yet */ 252 /* no bus structure exists in place yet */
251 if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1))) 253 rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
254 if (rc)
252 return rc; 255 return rc;
253 list_add_tail (&newbus->bus_list, &gbuses); 256 list_add_tail (&newbus->bus_list, &gbuses);
254 debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 257 debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -261,7 +264,8 @@ int __init ibmphp_rsrc_init (void)
261 return rc; 264 return rc;
262 } else { 265 } else {
263 /* went through all the buses and didn't find ours, need to create a new bus node */ 266 /* went through all the buses and didn't find ours, need to create a new bus node */
264 if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1))) 267 rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
268 if (rc)
265 return rc; 269 return rc;
266 list_add_tail (&newbus->bus_list, &gbuses); 270 list_add_tail (&newbus->bus_list, &gbuses);
267 debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 271 debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -271,7 +275,8 @@ int __init ibmphp_rsrc_init (void)
271 /* IO */ 275 /* IO */
272 if (list_empty (&gbuses)) { 276 if (list_empty (&gbuses)) {
273 /* no bus structure exists in place yet */ 277 /* no bus structure exists in place yet */
274 if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1))) 278 rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
279 if (rc)
275 return rc; 280 return rc;
276 list_add_tail (&newbus->bus_list, &gbuses); 281 list_add_tail (&newbus->bus_list, &gbuses);
277 debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 282 debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -283,7 +288,8 @@ int __init ibmphp_rsrc_init (void)
283 return rc; 288 return rc;
284 } else { 289 } else {
285 /* went through all the buses and didn't find ours, need to create a new bus node */ 290 /* went through all the buses and didn't find ours, need to create a new bus node */
286 if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1))) 291 rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
292 if (rc)
287 return rc; 293 return rc;
288 list_add_tail (&newbus->bus_list, &gbuses); 294 list_add_tail (&newbus->bus_list, &gbuses);
289 debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); 295 debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -1038,7 +1044,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1038 /* found our range */ 1044 /* found our range */
1039 if (!res_prev) { 1045 if (!res_prev) {
1040 /* first time in the loop */ 1046 /* first time in the loop */
1041 if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { 1047 len_tmp = res_cur->start - 1 - range->start;
1048
1049 if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
1042 debug ("len_tmp = %x\n", len_tmp); 1050 debug ("len_tmp = %x\n", len_tmp);
1043 1051
1044 if ((len_tmp < len_cur) || (len_cur == 0)) { 1052 if ((len_tmp < len_cur) || (len_cur == 0)) {
@@ -1078,7 +1086,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1078 } 1086 }
1079 if (!res_cur->next) { 1087 if (!res_cur->next) {
1080 /* last device on the range */ 1088 /* last device on the range */
1081 if ((range->end != res_cur->end) && ((len_tmp = range->end - (res_cur->end + 1)) >= res->len)) { 1089 len_tmp = range->end - (res_cur->end + 1);
1090
1091 if ((range->end != res_cur->end) && (len_tmp >= res->len)) {
1082 debug ("len_tmp = %x\n", len_tmp); 1092 debug ("len_tmp = %x\n", len_tmp);
1083 if ((len_tmp < len_cur) || (len_cur == 0)) { 1093 if ((len_tmp < len_cur) || (len_cur == 0)) {
1084 1094
@@ -1117,8 +1127,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1117 if (res_prev) { 1127 if (res_prev) {
1118 if (res_prev->rangeno != res_cur->rangeno) { 1128 if (res_prev->rangeno != res_cur->rangeno) {
1119 /* 1st device on this range */ 1129 /* 1st device on this range */
1120 if ((res_cur->start != range->start) && 1130 len_tmp = res_cur->start - 1 - range->start;
1121 ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { 1131
1132 if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
1122 if ((len_tmp < len_cur) || (len_cur == 0)) { 1133 if ((len_tmp < len_cur) || (len_cur == 0)) {
1123 if ((range->start % tmp_divide) == 0) { 1134 if ((range->start % tmp_divide) == 0) {
1124 /* just perfect, starting address is divisible by length */ 1135 /* just perfect, starting address is divisible by length */
@@ -1153,7 +1164,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1153 } 1164 }
1154 } else { 1165 } else {
1155 /* in the same range */ 1166 /* in the same range */
1156 if ((len_tmp = res_cur->start - 1 - res_prev->end - 1) >= res->len) { 1167 len_tmp = res_cur->start - 1 - res_prev->end - 1;
1168
1169 if (len_tmp >= res->len) {
1157 if ((len_tmp < len_cur) || (len_cur == 0)) { 1170 if ((len_tmp < len_cur) || (len_cur == 0)) {
1158 if (((res_prev->end + 1) % tmp_divide) == 0) { 1171 if (((res_prev->end + 1) % tmp_divide) == 0) {
1159 /* just perfect, starting address's divisible by length */ 1172 /* just perfect, starting address's divisible by length */
@@ -1212,7 +1225,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1212 break; 1225 break;
1213 } 1226 }
1214 while (range) { 1227 while (range) {
1215 if ((len_tmp = range->end - range->start) >= res->len) { 1228 len_tmp = range->end - range->start;
1229
1230 if (len_tmp >= res->len) {
1216 if ((len_tmp < len_cur) || (len_cur == 0)) { 1231 if ((len_tmp < len_cur) || (len_cur == 0)) {
1217 if ((range->start % tmp_divide) == 0) { 1232 if ((range->start % tmp_divide) == 0) {
1218 /* just perfect, starting address's divisible by length */ 1233 /* just perfect, starting address's divisible by length */
@@ -1276,7 +1291,9 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1276 break; 1291 break;
1277 } 1292 }
1278 while (range) { 1293 while (range) {
1279 if ((len_tmp = range->end - range->start) >= res->len) { 1294 len_tmp = range->end - range->start;
1295
1296 if (len_tmp >= res->len) {
1280 if ((len_tmp < len_cur) || (len_cur == 0)) { 1297 if ((len_tmp < len_cur) || (len_cur == 0)) {
1281 if ((range->start % tmp_divide) == 0) { 1298 if ((range->start % tmp_divide) == 0) {
1282 /* just perfect, starting address's divisible by length */ 1299 /* just perfect, starting address's divisible by length */
@@ -1335,7 +1352,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge)
1335 return -EINVAL; 1352 return -EINVAL;
1336 } 1353 }
1337 } 1354 }
1338 } /* end if(!res_cur) */ 1355 } /* end if (!res_cur) */
1339 return -EINVAL; 1356 return -EINVAL;
1340} 1357}
1341 1358
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 9e5a9fbb93d7..b11521953485 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -92,7 +92,7 @@ struct controller {
92 struct slot *slot; 92 struct slot *slot;
93 wait_queue_head_t queue; /* sleep & wake process */ 93 wait_queue_head_t queue; /* sleep & wake process */
94 u32 slot_cap; 94 u32 slot_cap;
95 u32 slot_ctrl; 95 u16 slot_ctrl;
96 struct timer_list poll_timer; 96 struct timer_list poll_timer;
97 unsigned long cmd_started; /* jiffies */ 97 unsigned long cmd_started; /* jiffies */
98 unsigned int cmd_busy:1; 98 unsigned int cmd_busy:1;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 07aa722bb12c..3a5e7e28b874 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -262,6 +262,13 @@ static int pciehp_probe(struct pcie_device *dev)
262 goto err_out_none; 262 goto err_out_none;
263 } 263 }
264 264
265 if (!dev->port->subordinate) {
266 /* Can happen if we run out of bus numbers during probe */
267 dev_err(&dev->device,
268 "Hotplug bridge without secondary bus, ignoring\n");
269 goto err_out_none;
270 }
271
265 ctrl = pcie_init(dev); 272 ctrl = pcie_init(dev);
266 if (!ctrl) { 273 if (!ctrl) {
267 dev_err(&dev->device, "Controller initialization failed\n"); 274 dev_err(&dev->device, "Controller initialization failed\n");
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9da84b8b27d8..f0dc6cb9c5be 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -171,9 +171,9 @@ static void pcie_wait_cmd(struct controller *ctrl)
171 * interrupts. 171 * interrupts.
172 */ 172 */
173 if (!rc) 173 if (!rc)
174 ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n", 174 ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
175 ctrl->slot_ctrl, 175 ctrl->slot_ctrl,
176 jiffies_to_msecs(now - ctrl->cmd_started)); 176 jiffies_to_msecs(jiffies - ctrl->cmd_started));
177} 177}
178 178
179/** 179/**
@@ -422,9 +422,9 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
422 default: 422 default:
423 return; 423 return;
424 } 424 }
425 pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
425 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 426 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
426 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 427 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
427 pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
428} 428}
429 429
430void pciehp_green_led_on(struct slot *slot) 430void pciehp_green_led_on(struct slot *slot)
@@ -602,6 +602,8 @@ void pcie_enable_notification(struct controller *ctrl)
602 PCI_EXP_SLTCTL_DLLSCE); 602 PCI_EXP_SLTCTL_DLLSCE);
603 603
604 pcie_write_cmd(ctrl, cmd, mask); 604 pcie_write_cmd(ctrl, cmd, mask);
605 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
606 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
605} 607}
606 608
607static void pcie_disable_notification(struct controller *ctrl) 609static void pcie_disable_notification(struct controller *ctrl)
@@ -613,6 +615,8 @@ static void pcie_disable_notification(struct controller *ctrl)
613 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | 615 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
614 PCI_EXP_SLTCTL_DLLSCE); 616 PCI_EXP_SLTCTL_DLLSCE);
615 pcie_write_cmd(ctrl, 0, mask); 617 pcie_write_cmd(ctrl, 0, mask);
618 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
619 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
616} 620}
617 621
618/* 622/*
@@ -640,6 +644,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
640 stat_mask |= PCI_EXP_SLTSTA_DLLSC; 644 stat_mask |= PCI_EXP_SLTSTA_DLLSC;
641 645
642 pcie_write_cmd(ctrl, 0, ctrl_mask); 646 pcie_write_cmd(ctrl, 0, ctrl_mask);
647 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
648 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
643 if (pciehp_poll_mode) 649 if (pciehp_poll_mode)
644 del_timer_sync(&ctrl->poll_timer); 650 del_timer_sync(&ctrl->poll_timer);
645 651
@@ -647,6 +653,8 @@ int pciehp_reset_slot(struct slot *slot, int probe)
647 653
648 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); 654 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
649 pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask); 655 pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
656 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
657 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
650 if (pciehp_poll_mode) 658 if (pciehp_poll_mode)
651 int_poll_timeout(ctrl->poll_timer.data); 659 int_poll_timeout(ctrl->poll_timer.data);
652 660
@@ -785,9 +793,6 @@ struct controller *pcie_init(struct pcie_device *dev)
785 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 793 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
786 PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); 794 PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
787 795
788 /* Disable software notification */
789 pcie_disable_notification(ctrl);
790
791 ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n", 796 ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
792 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, 797 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
793 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), 798 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 5f871f4c4af1..9e69403be632 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -65,14 +65,7 @@ int pciehp_configure_device(struct slot *p_slot)
65 pci_hp_add_bridge(dev); 65 pci_hp_add_bridge(dev);
66 66
67 pci_assign_unassigned_bridge_resources(bridge); 67 pci_assign_unassigned_bridge_resources(bridge);
68 68 pcie_bus_configure_settings(parent);
69 list_for_each_entry(dev, &parent->devices, bus_list) {
70 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
71 continue;
72
73 pci_configure_slot(dev);
74 }
75
76 pci_bus_add_devices(parent); 69 pci_bus_add_devices(parent);
77 70
78 out: 71 out:
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
deleted file mode 100644
index e246a10a0d2c..000000000000
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ /dev/null
@@ -1,180 +0,0 @@
1/*
2 * Copyright (C) 1995,2001 Compaq Computer Corporation
3 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
4 * Copyright (C) 2001 IBM Corp.
5 * Copyright (C) 2003-2004 Intel Corporation
6 * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
7 *
8 * All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/pci.h>
27#include <linux/export.h>
28#include <linux/pci_hotplug.h>
29
30static struct hpp_type0 pci_default_type0 = {
31 .revision = 1,
32 .cache_line_size = 8,
33 .latency_timer = 0x40,
34 .enable_serr = 0,
35 .enable_perr = 0,
36};
37
38static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
39{
40 u16 pci_cmd, pci_bctl;
41
42 if (!hpp) {
43 /*
44 * Perhaps we *should* use default settings for PCIe, but
45 * pciehp didn't, so we won't either.
46 */
47 if (pci_is_pcie(dev))
48 return;
49 dev_info(&dev->dev, "using default PCI settings\n");
50 hpp = &pci_default_type0;
51 }
52
53 if (hpp->revision > 1) {
54 dev_warn(&dev->dev,
55 "PCI settings rev %d not supported; using defaults\n",
56 hpp->revision);
57 hpp = &pci_default_type0;
58 }
59
60 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
61 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
62 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
63 if (hpp->enable_serr)
64 pci_cmd |= PCI_COMMAND_SERR;
65 else
66 pci_cmd &= ~PCI_COMMAND_SERR;
67 if (hpp->enable_perr)
68 pci_cmd |= PCI_COMMAND_PARITY;
69 else
70 pci_cmd &= ~PCI_COMMAND_PARITY;
71 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
72
73 /* Program bridge control value */
74 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
75 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
76 hpp->latency_timer);
77 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
78 if (hpp->enable_serr)
79 pci_bctl |= PCI_BRIDGE_CTL_SERR;
80 else
81 pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
82 if (hpp->enable_perr)
83 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
84 else
85 pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
86 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
87 }
88}
89
90static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
91{
92 if (hpp)
93 dev_warn(&dev->dev, "PCI-X settings not supported\n");
94}
95
96static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
97{
98 int pos;
99 u32 reg32;
100
101 if (!hpp)
102 return;
103
104 if (hpp->revision > 1) {
105 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
106 hpp->revision);
107 return;
108 }
109
110 /* Initialize Device Control Register */
111 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
112 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
113
114 /* Initialize Link Control Register */
115 if (dev->subordinate)
116 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
117 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
118
119 /* Find Advanced Error Reporting Enhanced Capability */
120 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
121 if (!pos)
122 return;
123
124 /* Initialize Uncorrectable Error Mask Register */
125 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
126 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
127 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
128
129 /* Initialize Uncorrectable Error Severity Register */
130 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
131 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
132 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
133
134 /* Initialize Correctable Error Mask Register */
135 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
136 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
137 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
138
139 /* Initialize Advanced Error Capabilities and Control Register */
140 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
141 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
142 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
143
144 /*
145 * FIXME: The following two registers are not supported yet.
146 *
147 * o Secondary Uncorrectable Error Severity Register
148 * o Secondary Uncorrectable Error Mask Register
149 */
150}
151
152void pci_configure_slot(struct pci_dev *dev)
153{
154 struct pci_dev *cdev;
155 struct hotplug_params hpp;
156 int ret;
157
158 if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
159 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
160 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
161 return;
162
163 pcie_bus_configure_settings(dev->bus);
164
165 memset(&hpp, 0, sizeof(hpp));
166 ret = pci_get_hp_params(dev, &hpp);
167 if (ret)
168 dev_warn(&dev->dev, "no hotplug settings from platform\n");
169
170 program_hpp_type2(dev, hpp.t2);
171 program_hpp_type1(dev, hpp.t1);
172 program_hpp_type0(dev, hpp.t0);
173
174 if (dev->subordinate) {
175 list_for_each_entry(cdev, &dev->subordinate->devices,
176 bus_list)
177 pci_configure_slot(cdev);
178 }
179}
180EXPORT_SYMBOL_GPL(pci_configure_slot);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index a81fb67ea9a1..10c7927599b3 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -195,7 +195,8 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
195 int rc = 0; 195 int rc = 0;
196 196
197 ctrl_dbg(ctrl, "Change speed to %d\n", speed); 197 ctrl_dbg(ctrl, "Change speed to %d\n", speed);
198 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) { 198 rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
199 if (rc) {
199 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", 200 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
200 __func__); 201 __func__);
201 return WRONG_BUS_FREQUENCY; 202 return WRONG_BUS_FREQUENCY;
@@ -261,14 +262,16 @@ static int board_added(struct slot *p_slot)
261 } 262 }
262 263
263 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { 264 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
264 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { 265 rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
266 if (rc) {
265 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", 267 ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
266 __func__); 268 __func__);
267 return WRONG_BUS_FREQUENCY; 269 return WRONG_BUS_FREQUENCY;
268 } 270 }
269 271
270 /* turn on board, blink green LED, turn off Amber LED */ 272 /* turn on board, blink green LED, turn off Amber LED */
271 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 273 rc = p_slot->hpc_ops->slot_enable(p_slot);
274 if (rc) {
272 ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); 275 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
273 return rc; 276 return rc;
274 } 277 }
@@ -296,7 +299,8 @@ static int board_added(struct slot *p_slot)
296 return rc; 299 return rc;
297 300
298 /* turn on board, blink green LED, turn off Amber LED */ 301 /* turn on board, blink green LED, turn off Amber LED */
299 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 302 rc = p_slot->hpc_ops->slot_enable(p_slot);
303 if (rc) {
300 ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); 304 ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
301 return rc; 305 return rc;
302 } 306 }
@@ -595,7 +599,7 @@ static int shpchp_enable_slot (struct slot *p_slot)
595 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); 599 ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
596 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 600 p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
597 601
598 if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) || 602 if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
599 (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)) 603 (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
600 && p_slot->ctrl->num_slots == 1) { 604 && p_slot->ctrl->num_slots == 1) {
601 /* handle amd pogo errata; this must be done before enable */ 605 /* handle amd pogo errata; this must be done before enable */
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 29e22352822c..7d223e9080ef 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -466,7 +466,8 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
466 u8 m66_cap = !!(slot_reg & MHZ66_CAP); 466 u8 m66_cap = !!(slot_reg & MHZ66_CAP);
467 u8 pi, pcix_cap; 467 u8 pi, pcix_cap;
468 468
469 if ((retval = hpc_get_prog_int(slot, &pi))) 469 retval = hpc_get_prog_int(slot, &pi);
470 if (retval)
470 return retval; 471 return retval;
471 472
472 switch (pi) { 473 switch (pi) {
@@ -798,7 +799,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
798 799
799 ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc); 800 ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
800 801
801 if(!shpchp_poll_mode) { 802 if (!shpchp_poll_mode) {
802 /* 803 /*
803 * Mask Global Interrupt Mask - see implementation 804 * Mask Global Interrupt Mask - see implementation
804 * note on p. 139 of SHPC spec rev 1.0 805 * note on p. 139 of SHPC spec rev 1.0
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 469454e0cc48..f8cd3a27e351 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -69,13 +69,7 @@ int shpchp_configure_device(struct slot *p_slot)
69 } 69 }
70 70
71 pci_assign_unassigned_bridge_resources(bridge); 71 pci_assign_unassigned_bridge_resources(bridge);
72 72 pcie_bus_configure_settings(parent);
73 list_for_each_entry(dev, &parent->devices, bus_list) {
74 if (PCI_SLOT(dev->devfn) != p_slot->device)
75 continue;
76 pci_configure_slot(dev);
77 }
78
79 pci_bus_add_devices(parent); 73 pci_bus_add_devices(parent);
80 74
81 out: 75 out:
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index cb6f24740ee3..4d109c07294a 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -633,7 +633,7 @@ int pci_vfs_assigned(struct pci_dev *dev)
633 * our dev as the physical function and the assigned bit is set 633 * our dev as the physical function and the assigned bit is set
634 */ 634 */
635 if (vfdev->is_virtfn && (vfdev->physfn == dev) && 635 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
636 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) 636 pci_is_dev_assigned(vfdev))
637 vfs_assigned++; 637 vfs_assigned++;
638 638
639 vfdev = pci_get_device(dev->vendor, dev_id, vfdev); 639 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 37263b0ebfe3..6ebf8edc5f3c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -10,6 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/pci_hotplug.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/pci-aspm.h> 15#include <linux/pci-aspm.h>
15#include <linux/pci-acpi.h> 16#include <linux/pci-acpi.h>
@@ -17,6 +18,267 @@
17#include <linux/pm_qos.h> 18#include <linux/pm_qos.h>
18#include "pci.h" 19#include "pci.h"
19 20
21phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
22{
23 acpi_status status = AE_NOT_EXIST;
24 unsigned long long mcfg_addr;
25
26 if (handle)
27 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
28 NULL, &mcfg_addr);
29 if (ACPI_FAILURE(status))
30 return 0;
31
32 return (phys_addr_t)mcfg_addr;
33}
34
35static acpi_status decode_type0_hpx_record(union acpi_object *record,
36 struct hotplug_params *hpx)
37{
38 int i;
39 union acpi_object *fields = record->package.elements;
40 u32 revision = fields[1].integer.value;
41
42 switch (revision) {
43 case 1:
44 if (record->package.count != 6)
45 return AE_ERROR;
46 for (i = 2; i < 6; i++)
47 if (fields[i].type != ACPI_TYPE_INTEGER)
48 return AE_ERROR;
49 hpx->t0 = &hpx->type0_data;
50 hpx->t0->revision = revision;
51 hpx->t0->cache_line_size = fields[2].integer.value;
52 hpx->t0->latency_timer = fields[3].integer.value;
53 hpx->t0->enable_serr = fields[4].integer.value;
54 hpx->t0->enable_perr = fields[5].integer.value;
55 break;
56 default:
57 printk(KERN_WARNING
58 "%s: Type 0 Revision %d record not supported\n",
59 __func__, revision);
60 return AE_ERROR;
61 }
62 return AE_OK;
63}
64
65static acpi_status decode_type1_hpx_record(union acpi_object *record,
66 struct hotplug_params *hpx)
67{
68 int i;
69 union acpi_object *fields = record->package.elements;
70 u32 revision = fields[1].integer.value;
71
72 switch (revision) {
73 case 1:
74 if (record->package.count != 5)
75 return AE_ERROR;
76 for (i = 2; i < 5; i++)
77 if (fields[i].type != ACPI_TYPE_INTEGER)
78 return AE_ERROR;
79 hpx->t1 = &hpx->type1_data;
80 hpx->t1->revision = revision;
81 hpx->t1->max_mem_read = fields[2].integer.value;
82 hpx->t1->avg_max_split = fields[3].integer.value;
83 hpx->t1->tot_max_split = fields[4].integer.value;
84 break;
85 default:
86 printk(KERN_WARNING
87 "%s: Type 1 Revision %d record not supported\n",
88 __func__, revision);
89 return AE_ERROR;
90 }
91 return AE_OK;
92}
93
94static acpi_status decode_type2_hpx_record(union acpi_object *record,
95 struct hotplug_params *hpx)
96{
97 int i;
98 union acpi_object *fields = record->package.elements;
99 u32 revision = fields[1].integer.value;
100
101 switch (revision) {
102 case 1:
103 if (record->package.count != 18)
104 return AE_ERROR;
105 for (i = 2; i < 18; i++)
106 if (fields[i].type != ACPI_TYPE_INTEGER)
107 return AE_ERROR;
108 hpx->t2 = &hpx->type2_data;
109 hpx->t2->revision = revision;
110 hpx->t2->unc_err_mask_and = fields[2].integer.value;
111 hpx->t2->unc_err_mask_or = fields[3].integer.value;
112 hpx->t2->unc_err_sever_and = fields[4].integer.value;
113 hpx->t2->unc_err_sever_or = fields[5].integer.value;
114 hpx->t2->cor_err_mask_and = fields[6].integer.value;
115 hpx->t2->cor_err_mask_or = fields[7].integer.value;
116 hpx->t2->adv_err_cap_and = fields[8].integer.value;
117 hpx->t2->adv_err_cap_or = fields[9].integer.value;
118 hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
119 hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
120 hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
121 hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
122 hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
123 hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
124 hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
125 hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
126 break;
127 default:
128 printk(KERN_WARNING
129 "%s: Type 2 Revision %d record not supported\n",
130 __func__, revision);
131 return AE_ERROR;
132 }
133 return AE_OK;
134}
135
136static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
137{
138 acpi_status status;
139 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
140 union acpi_object *package, *record, *fields;
141 u32 type;
142 int i;
143
144 /* Clear the return buffer with zeros */
145 memset(hpx, 0, sizeof(struct hotplug_params));
146
147 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
148 if (ACPI_FAILURE(status))
149 return status;
150
151 package = (union acpi_object *)buffer.pointer;
152 if (package->type != ACPI_TYPE_PACKAGE) {
153 status = AE_ERROR;
154 goto exit;
155 }
156
157 for (i = 0; i < package->package.count; i++) {
158 record = &package->package.elements[i];
159 if (record->type != ACPI_TYPE_PACKAGE) {
160 status = AE_ERROR;
161 goto exit;
162 }
163
164 fields = record->package.elements;
165 if (fields[0].type != ACPI_TYPE_INTEGER ||
166 fields[1].type != ACPI_TYPE_INTEGER) {
167 status = AE_ERROR;
168 goto exit;
169 }
170
171 type = fields[0].integer.value;
172 switch (type) {
173 case 0:
174 status = decode_type0_hpx_record(record, hpx);
175 if (ACPI_FAILURE(status))
176 goto exit;
177 break;
178 case 1:
179 status = decode_type1_hpx_record(record, hpx);
180 if (ACPI_FAILURE(status))
181 goto exit;
182 break;
183 case 2:
184 status = decode_type2_hpx_record(record, hpx);
185 if (ACPI_FAILURE(status))
186 goto exit;
187 break;
188 default:
189 printk(KERN_ERR "%s: Type %d record not supported\n",
190 __func__, type);
191 status = AE_ERROR;
192 goto exit;
193 }
194 }
195 exit:
196 kfree(buffer.pointer);
197 return status;
198}
199
200static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
201{
202 acpi_status status;
203 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
204 union acpi_object *package, *fields;
205 int i;
206
207 memset(hpp, 0, sizeof(struct hotplug_params));
208
209 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
210 if (ACPI_FAILURE(status))
211 return status;
212
213 package = (union acpi_object *) buffer.pointer;
214 if (package->type != ACPI_TYPE_PACKAGE ||
215 package->package.count != 4) {
216 status = AE_ERROR;
217 goto exit;
218 }
219
220 fields = package->package.elements;
221 for (i = 0; i < 4; i++) {
222 if (fields[i].type != ACPI_TYPE_INTEGER) {
223 status = AE_ERROR;
224 goto exit;
225 }
226 }
227
228 hpp->t0 = &hpp->type0_data;
229 hpp->t0->revision = 1;
230 hpp->t0->cache_line_size = fields[0].integer.value;
231 hpp->t0->latency_timer = fields[1].integer.value;
232 hpp->t0->enable_serr = fields[2].integer.value;
233 hpp->t0->enable_perr = fields[3].integer.value;
234
235exit:
236 kfree(buffer.pointer);
237 return status;
238}
239
240/* pci_get_hp_params
241 *
242 * @dev - the pci_dev for which we want parameters
243 * @hpp - allocated by the caller
244 */
245int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
246{
247 acpi_status status;
248 acpi_handle handle, phandle;
249 struct pci_bus *pbus;
250
251 handle = NULL;
252 for (pbus = dev->bus; pbus; pbus = pbus->parent) {
253 handle = acpi_pci_get_bridge_handle(pbus);
254 if (handle)
255 break;
256 }
257
258 /*
259 * _HPP settings apply to all child buses, until another _HPP is
260 * encountered. If we don't find an _HPP for the input pci dev,
261 * look for it in the parent device scope since that would apply to
262 * this pci dev.
263 */
264 while (handle) {
265 status = acpi_run_hpx(handle, hpp);
266 if (ACPI_SUCCESS(status))
267 return 0;
268 status = acpi_run_hpp(handle, hpp);
269 if (ACPI_SUCCESS(status))
270 return 0;
271 if (acpi_is_root_bridge(handle))
272 break;
273 status = acpi_get_parent(handle, &phandle);
274 if (ACPI_FAILURE(status))
275 break;
276 handle = phandle;
277 }
278 return -ENODEV;
279}
280EXPORT_SYMBOL_GPL(pci_get_hp_params);
281
20/** 282/**
21 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 283 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
22 * @work: Work item to handle. 284 * @work: Work item to handle.
@@ -84,20 +346,6 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
84 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 346 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
85} 347}
86 348
87phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
88{
89 acpi_status status = AE_NOT_EXIST;
90 unsigned long long mcfg_addr;
91
92 if (handle)
93 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
94 NULL, &mcfg_addr);
95 if (ACPI_FAILURE(status))
96 return 0;
97
98 return (phys_addr_t)mcfg_addr;
99}
100
101/* 349/*
102 * _SxD returns the D-state with the highest power 350 * _SxD returns the D-state with the highest power
103 * (lowest D-state number) supported in the S-state "x". 351 * (lowest D-state number) supported in the S-state "x".
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d04c5adafc16..2b3c89425bb5 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -55,7 +55,6 @@ int pci_add_dynid(struct pci_driver *drv,
55 unsigned long driver_data) 55 unsigned long driver_data)
56{ 56{
57 struct pci_dynid *dynid; 57 struct pci_dynid *dynid;
58 int retval;
59 58
60 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 59 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
61 if (!dynid) 60 if (!dynid)
@@ -73,9 +72,7 @@ int pci_add_dynid(struct pci_driver *drv,
73 list_add_tail(&dynid->node, &drv->dynids.list); 72 list_add_tail(&dynid->node, &drv->dynids.list);
74 spin_unlock(&drv->dynids.lock); 73 spin_unlock(&drv->dynids.lock);
75 74
76 retval = driver_attach(&drv->driver); 75 return driver_attach(&drv->driver);
77
78 return retval;
79} 76}
80EXPORT_SYMBOL_GPL(pci_add_dynid); 77EXPORT_SYMBOL_GPL(pci_add_dynid);
81 78
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9ff0a901ecf7..76ef7914c9aa 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -177,7 +177,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
177{ 177{
178 struct pci_dev *pci_dev = to_pci_dev(dev); 178 struct pci_dev *pci_dev = to_pci_dev(dev);
179 179
180 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n", 180 return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
181 pci_dev->vendor, pci_dev->device, 181 pci_dev->vendor, pci_dev->device,
182 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 182 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
183 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 183 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6e994fc077f4..625a4ace10b4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1003,12 +1003,19 @@ int pci_save_state(struct pci_dev *dev)
1003 for (i = 0; i < 16; i++) 1003 for (i = 0; i < 16; i++)
1004 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 1004 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1005 dev->state_saved = true; 1005 dev->state_saved = true;
1006 if ((i = pci_save_pcie_state(dev)) != 0) 1006
1007 i = pci_save_pcie_state(dev);
1008 if (i != 0)
1007 return i; 1009 return i;
1008 if ((i = pci_save_pcix_state(dev)) != 0) 1010
1011 i = pci_save_pcix_state(dev);
1012 if (i != 0)
1009 return i; 1013 return i;
1010 if ((i = pci_save_vc_state(dev)) != 0) 1014
1015 i = pci_save_vc_state(dev);
1016 if (i != 0)
1011 return i; 1017 return i;
1018
1012 return 0; 1019 return 0;
1013} 1020}
1014EXPORT_SYMBOL(pci_save_state); 1021EXPORT_SYMBOL(pci_save_state);
@@ -1907,10 +1914,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1907 if (target_state == PCI_POWER_ERROR) 1914 if (target_state == PCI_POWER_ERROR)
1908 return -EIO; 1915 return -EIO;
1909 1916
1910 /* D3cold during system suspend/hibernate is not supported */
1911 if (target_state > PCI_D3hot)
1912 target_state = PCI_D3hot;
1913
1914 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1917 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1915 1918
1916 error = pci_set_power_state(dev, target_state); 1919 error = pci_set_power_state(dev, target_state);
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 35d06e177917..c6849d9e86ce 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -89,15 +89,17 @@ static const char *aer_correctable_error_string[] = {
89 NULL, 89 NULL,
90 "Replay Timer Timeout", /* Bit Position 12 */ 90 "Replay Timer Timeout", /* Bit Position 12 */
91 "Advisory Non-Fatal", /* Bit Position 13 */ 91 "Advisory Non-Fatal", /* Bit Position 13 */
92 "Corrected Internal Error", /* Bit Position 14 */
93 "Header Log Overflow", /* Bit Position 15 */
92}; 94};
93 95
94static const char *aer_uncorrectable_error_string[] = { 96static const char *aer_uncorrectable_error_string[] = {
95 NULL, 97 "Undefined", /* Bit Position 0 */
96 NULL, 98 NULL,
97 NULL, 99 NULL,
98 NULL, 100 NULL,
99 "Data Link Protocol", /* Bit Position 4 */ 101 "Data Link Protocol", /* Bit Position 4 */
100 NULL, 102 "Surprise Down Error", /* Bit Position 5 */
101 NULL, 103 NULL,
102 NULL, 104 NULL,
103 NULL, 105 NULL,
@@ -113,6 +115,11 @@ static const char *aer_uncorrectable_error_string[] = {
113 "Malformed TLP", /* Bit Position 18 */ 115 "Malformed TLP", /* Bit Position 18 */
114 "ECRC", /* Bit Position 19 */ 116 "ECRC", /* Bit Position 19 */
115 "Unsupported Request", /* Bit Position 20 */ 117 "Unsupported Request", /* Bit Position 20 */
118 "ACS Violation", /* Bit Position 21 */
119 "Uncorrectable Internal Error", /* Bit Position 22 */
120 "MC Blocked TLP", /* Bit Position 23 */
121 "AtomicOp Egress Blocked", /* Bit Position 24 */
122 "TLP Prefix Blocked Error", /* Bit Position 25 */
116}; 123};
117 124
118static const char *aer_agent_string[] = { 125static const char *aer_agent_string[] = {
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 2ccc9b926ea7..be35da2e105e 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -93,77 +93,6 @@ static int pcie_port_resume_noirq(struct device *dev)
93 return 0; 93 return 0;
94} 94}
95 95
96#ifdef CONFIG_PM_RUNTIME
97struct d3cold_info {
98 bool no_d3cold;
99 unsigned int d3cold_delay;
100};
101
102static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
103{
104 struct d3cold_info *info = data;
105
106 info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
107 info->d3cold_delay);
108 if (pdev->no_d3cold)
109 info->no_d3cold = true;
110 return 0;
111}
112
113static int pcie_port_runtime_suspend(struct device *dev)
114{
115 struct pci_dev *pdev = to_pci_dev(dev);
116 struct d3cold_info d3cold_info = {
117 .no_d3cold = false,
118 .d3cold_delay = PCI_PM_D3_WAIT,
119 };
120
121 /*
122 * If any subordinate device disable D3cold, we should not put
123 * the port into D3cold. The D3cold delay of port should be
124 * the max of that of all subordinate devices.
125 */
126 pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
127 pdev->no_d3cold = d3cold_info.no_d3cold;
128 pdev->d3cold_delay = d3cold_info.d3cold_delay;
129 return 0;
130}
131
132static int pcie_port_runtime_resume(struct device *dev)
133{
134 return 0;
135}
136
137static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
138{
139 bool *pme_poll = data;
140
141 if (pdev->pme_poll)
142 *pme_poll = true;
143 return 0;
144}
145
146static int pcie_port_runtime_idle(struct device *dev)
147{
148 struct pci_dev *pdev = to_pci_dev(dev);
149 bool pme_poll = false;
150
151 /*
152 * If any subordinate device needs pme poll, we should keep
153 * the port in D0, because we need port in D0 to poll it.
154 */
155 pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
156 /* Delay for a short while to prevent too frequent suspend/resume */
157 if (!pme_poll)
158 pm_schedule_suspend(dev, 10);
159 return -EBUSY;
160}
161#else
162#define pcie_port_runtime_suspend NULL
163#define pcie_port_runtime_resume NULL
164#define pcie_port_runtime_idle NULL
165#endif
166
167static const struct dev_pm_ops pcie_portdrv_pm_ops = { 96static const struct dev_pm_ops pcie_portdrv_pm_ops = {
168 .suspend = pcie_port_device_suspend, 97 .suspend = pcie_port_device_suspend,
169 .resume = pcie_port_device_resume, 98 .resume = pcie_port_device_resume,
@@ -172,9 +101,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
172 .poweroff = pcie_port_device_suspend, 101 .poweroff = pcie_port_device_suspend,
173 .restore = pcie_port_device_resume, 102 .restore = pcie_port_device_resume,
174 .resume_noirq = pcie_port_resume_noirq, 103 .resume_noirq = pcie_port_resume_noirq,
175 .runtime_suspend = pcie_port_runtime_suspend,
176 .runtime_resume = pcie_port_runtime_resume,
177 .runtime_idle = pcie_port_runtime_idle,
178}; 104};
179 105
180#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) 106#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 636d1c9156a9..efa48dc0de3b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -6,6 +6,7 @@
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/pci_hotplug.h>
9#include <linux/slab.h> 10#include <linux/slab.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/cpumask.h> 12#include <linux/cpumask.h>
@@ -744,6 +745,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
744} 745}
745EXPORT_SYMBOL(pci_add_new_bus); 746EXPORT_SYMBOL(pci_add_new_bus);
746 747
748static void pci_enable_crs(struct pci_dev *pdev)
749{
750 u16 root_cap = 0;
751
752 /* Enable CRS Software Visibility if supported */
753 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
754 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
755 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
756 PCI_EXP_RTCTL_CRSSVE);
757}
758
747/* 759/*
748 * If it's a bridge, configure it and scan the bus behind it. 760 * If it's a bridge, configure it and scan the bus behind it.
749 * For CardBus bridges, we don't scan behind as the devices will 761 * For CardBus bridges, we don't scan behind as the devices will
@@ -791,6 +803,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
791 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 803 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
792 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 804 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
793 805
806 pci_enable_crs(dev);
807
794 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 808 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
795 !is_cardbus && !broken) { 809 !is_cardbus && !broken) {
796 unsigned int cmax; 810 unsigned int cmax;
@@ -1240,6 +1254,137 @@ int pci_setup_device(struct pci_dev *dev)
1240 return 0; 1254 return 0;
1241} 1255}
1242 1256
1257static struct hpp_type0 pci_default_type0 = {
1258 .revision = 1,
1259 .cache_line_size = 8,
1260 .latency_timer = 0x40,
1261 .enable_serr = 0,
1262 .enable_perr = 0,
1263};
1264
1265static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1266{
1267 u16 pci_cmd, pci_bctl;
1268
1269 if (!hpp)
1270 hpp = &pci_default_type0;
1271
1272 if (hpp->revision > 1) {
1273 dev_warn(&dev->dev,
1274 "PCI settings rev %d not supported; using defaults\n",
1275 hpp->revision);
1276 hpp = &pci_default_type0;
1277 }
1278
1279 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1280 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1281 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1282 if (hpp->enable_serr)
1283 pci_cmd |= PCI_COMMAND_SERR;
1284 if (hpp->enable_perr)
1285 pci_cmd |= PCI_COMMAND_PARITY;
1286 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1287
1288 /* Program bridge control value */
1289 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1290 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1291 hpp->latency_timer);
1292 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1293 if (hpp->enable_serr)
1294 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1295 if (hpp->enable_perr)
1296 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1297 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1298 }
1299}
1300
1301static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1302{
1303 if (hpp)
1304 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1305}
1306
1307static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1308{
1309 int pos;
1310 u32 reg32;
1311
1312 if (!hpp)
1313 return;
1314
1315 if (hpp->revision > 1) {
1316 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1317 hpp->revision);
1318 return;
1319 }
1320
1321 /*
1322 * Don't allow _HPX to change MPS or MRRS settings. We manage
1323 * those to make sure they're consistent with the rest of the
1324 * platform.
1325 */
1326 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1327 PCI_EXP_DEVCTL_READRQ;
1328 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1329 PCI_EXP_DEVCTL_READRQ);
1330
1331 /* Initialize Device Control Register */
1332 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1333 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1334
1335 /* Initialize Link Control Register */
1336 if (dev->subordinate)
1337 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1338 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1339
1340 /* Find Advanced Error Reporting Enhanced Capability */
1341 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1342 if (!pos)
1343 return;
1344
1345 /* Initialize Uncorrectable Error Mask Register */
1346 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1347 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1348 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1349
1350 /* Initialize Uncorrectable Error Severity Register */
1351 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1352 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1353 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1354
1355 /* Initialize Correctable Error Mask Register */
1356 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1357 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1358 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1359
1360 /* Initialize Advanced Error Capabilities and Control Register */
1361 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1362 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1363 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1364
1365 /*
1366 * FIXME: The following two registers are not supported yet.
1367 *
1368 * o Secondary Uncorrectable Error Severity Register
1369 * o Secondary Uncorrectable Error Mask Register
1370 */
1371}
1372
1373static void pci_configure_device(struct pci_dev *dev)
1374{
1375 struct hotplug_params hpp;
1376 int ret;
1377
1378 memset(&hpp, 0, sizeof(hpp));
1379 ret = pci_get_hp_params(dev, &hpp);
1380 if (ret)
1381 return;
1382
1383 program_hpp_type2(dev, hpp.t2);
1384 program_hpp_type1(dev, hpp.t1);
1385 program_hpp_type0(dev, hpp.t0);
1386}
1387
1243static void pci_release_capabilities(struct pci_dev *dev) 1388static void pci_release_capabilities(struct pci_dev *dev)
1244{ 1389{
1245 pci_vpd_release(dev); 1390 pci_vpd_release(dev);
@@ -1296,8 +1441,13 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1296 *l == 0x0000ffff || *l == 0xffff0000) 1441 *l == 0x0000ffff || *l == 0xffff0000)
1297 return false; 1442 return false;
1298 1443
1299 /* Configuration request Retry Status */ 1444 /*
1300 while (*l == 0xffff0001) { 1445 * Configuration Request Retry Status. Some root ports return the
1446 * actual device ID instead of the synthetic ID (0xFFFF) required
1447 * by the PCIe spec. Ignore the device ID and only check for
1448 * (vendor id == 1).
1449 */
1450 while ((*l & 0xffff) == 0x0001) {
1301 if (!crs_timeout) 1451 if (!crs_timeout)
1302 return false; 1452 return false;
1303 1453
@@ -1377,6 +1527,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1377{ 1527{
1378 int ret; 1528 int ret;
1379 1529
1530 pci_configure_device(dev);
1531
1380 device_initialize(&dev->dev); 1532 device_initialize(&dev->dev);
1381 dev->dev.release = pci_release_dev; 1533 dev->dev.release = pci_release_dev;
1382 1534
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 80c2d014283d..b6c65009e858 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/ktime.h> 26#include <linux/ktime.h>
27#include <linux/mm.h>
27#include <asm/dma.h> /* isa_dma_bridge_buggy */ 28#include <asm/dma.h> /* isa_dma_bridge_buggy */
28#include "pci.h" 29#include "pci.h"
29 30
@@ -287,6 +288,25 @@ static void quirk_citrine(struct pci_dev *dev)
287} 288}
288DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); 289DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
289 290
291/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
292static void quirk_extend_bar_to_page(struct pci_dev *dev)
293{
294 int i;
295
296 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
297 struct resource *r = &dev->resource[i];
298
299 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
300 r->end = PAGE_SIZE - 1;
301 r->start = 0;
302 r->flags |= IORESOURCE_UNSET;
303 dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
304 i, r);
305 }
306 }
307}
308DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
309
290/* 310/*
291 * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. 311 * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
292 * If it's needed, re-allocate the region. 312 * If it's needed, re-allocate the region.
@@ -2985,6 +3005,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
2985 */ 3005 */
2986DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, 3006DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
2987 quirk_broken_intx_masking); 3007 quirk_broken_intx_masking);
3008DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3009 quirk_broken_intx_masking);
2988 3010
2989#ifdef CONFIG_ACPI 3011#ifdef CONFIG_ACPI
2990/* 3012/*
@@ -3512,57 +3534,6 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
3512/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ 3534/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
3513DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); 3535DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
3514 3536
3515static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
3516{
3517 if (!PCI_FUNC(dev->devfn))
3518 return pci_dev_get(dev);
3519
3520 return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
3521}
3522
3523static const struct pci_dev_dma_source {
3524 u16 vendor;
3525 u16 device;
3526 struct pci_dev *(*dma_source)(struct pci_dev *dev);
3527} pci_dev_dma_source[] = {
3528 /*
3529 * https://bugzilla.redhat.com/show_bug.cgi?id=605888
3530 *
3531 * Some Ricoh devices use the function 0 source ID for DMA on
3532 * other functions of a multifunction device. The DMA devices
3533 * is therefore function 0, which will have implications of the
3534 * iommu grouping of these devices.
3535 */
3536 { PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
3537 { PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
3538 { PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
3539 { PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
3540 { 0 }
3541};
3542
3543/*
3544 * IOMMUs with isolation capabilities need to be programmed with the
3545 * correct source ID of a device. In most cases, the source ID matches
3546 * the device doing the DMA, but sometimes hardware is broken and will
3547 * tag the DMA as being sourced from a different device. This function
3548 * allows that translation. Note that the reference count of the
3549 * returned device is incremented on all paths.
3550 */
3551struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
3552{
3553 const struct pci_dev_dma_source *i;
3554
3555 for (i = pci_dev_dma_source; i->dma_source; i++) {
3556 if ((i->vendor == dev->vendor ||
3557 i->vendor == (u16)PCI_ANY_ID) &&
3558 (i->device == dev->device ||
3559 i->device == (u16)PCI_ANY_ID))
3560 return i->dma_source(dev);
3561 }
3562
3563 return pci_dev_get(dev);
3564}
3565
3566/* 3537/*
3567 * AMD has indicated that the devices below do not support peer-to-peer 3538 * AMD has indicated that the devices below do not support peer-to-peer
3568 * in any system where they are found in the southbridge with an AMD 3539 * in any system where they are found in the southbridge with an AMD
@@ -3664,6 +3635,23 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
3664 return acs_flags & ~flags ? 0 : 1; 3635 return acs_flags & ~flags ? 0 : 1;
3665} 3636}
3666 3637
3638static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
3639{
3640 /*
3641 * SV, TB, and UF are not relevant to multifunction endpoints.
3642 *
3643 * Multifunction devices are only required to implement RR, CR, and DT
3644 * in their ACS capability if they support peer-to-peer transactions.
3645 * Devices matching this quirk have been verified by the vendor to not
3646 * perform peer-to-peer with other functions, allowing us to mask out
3647 * these bits as if they were unimplemented in the ACS capability.
3648 */
3649 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
3650 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
3651
3652 return acs_flags ? 0 : 1;
3653}
3654
3667static const struct pci_dev_acs_enabled { 3655static const struct pci_dev_acs_enabled {
3668 u16 vendor; 3656 u16 vendor;
3669 u16 device; 3657 u16 device;
@@ -3675,6 +3663,28 @@ static const struct pci_dev_acs_enabled {
3675 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs }, 3663 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
3676 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs }, 3664 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
3677 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs }, 3665 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
3666 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
3667 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
3668 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
3669 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
3670 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
3671 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
3672 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
3673 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
3674 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
3675 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
3676 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
3677 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
3678 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
3679 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
3680 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
3681 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
3682 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
3683 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
3684 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
3685 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
3686 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
3687 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
3678 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, 3688 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
3679 { 0 } 3689 { 0 }
3680}; 3690};
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 827ad831f1dd..a81f413083e4 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -103,40 +103,6 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
103 return ret; 103 return ret;
104} 104}
105 105
106/*
107 * find the upstream PCIe-to-PCI bridge of a PCI device
108 * if the device is PCIE, return NULL
109 * if the device isn't connected to a PCIe bridge (that is its parent is a
110 * legacy PCI bridge and the bridge is directly connected to bus 0), return its
111 * parent
112 */
113struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
114{
115 struct pci_dev *tmp = NULL;
116
117 if (pci_is_pcie(pdev))
118 return NULL;
119 while (1) {
120 if (pci_is_root_bus(pdev->bus))
121 break;
122 pdev = pdev->bus->self;
123 /* a p2p bridge */
124 if (!pci_is_pcie(pdev)) {
125 tmp = pdev;
126 continue;
127 }
128 /* PCI device should connect to a PCIe bridge */
129 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) {
130 /* Busted hardware? */
131 WARN_ON_ONCE(1);
132 return NULL;
133 }
134 return pdev;
135 }
136
137 return tmp;
138}
139
140static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr) 106static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
141{ 107{
142 struct pci_bus *child; 108 struct pci_bus *child;