aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt3
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi2
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c2
-rw-r--r--arch/x86/pci/mmconfig-shared.c7
-rw-r--r--arch/x86/pci/mrst.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-exynos.c530
-rw-r--r--drivers/pci/host/pci-mvebu.c7
-rw-r--r--drivers/pci/host/pcie-designware.c1011
-rw-r--r--drivers/pci/host/pcie-designware.h65
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c12
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/pci-sysfs.c32
-rw-r--r--drivers/pci/pci.c506
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c35
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c147
-rw-r--r--drivers/pci/setup-bus.c25
-rw-r--r--include/linux/pci.h6
-rw-r--r--include/linux/pci_hotplug.h4
28 files changed, 1593 insertions, 928 deletions
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index e2371f5cdebe..eabcb4b5db6e 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -18,6 +18,7 @@ Required properties:
18- interrupt-map-mask and interrupt-map: standard PCI properties 18- interrupt-map-mask and interrupt-map: standard PCI properties
19 to define the mapping of the PCIe interface to interrupt 19 to define the mapping of the PCIe interface to interrupt
20 numbers. 20 numbers.
21- num-lanes: number of lanes to use
21- reset-gpio: gpio pin number of power good signal 22- reset-gpio: gpio pin number of power good signal
22 23
23Example: 24Example:
@@ -41,6 +42,7 @@ SoC specific DT Entry:
41 #interrupt-cells = <1>; 42 #interrupt-cells = <1>;
42 interrupt-map-mask = <0 0 0 0>; 43 interrupt-map-mask = <0 0 0 0>;
43 interrupt-map = <0x0 0 &gic 53>; 44 interrupt-map = <0x0 0 &gic 53>;
45 num-lanes = <4>;
44 }; 46 };
45 47
46 pcie@2a0000 { 48 pcie@2a0000 {
@@ -60,6 +62,7 @@ SoC specific DT Entry:
60 #interrupt-cells = <1>; 62 #interrupt-cells = <1>;
61 interrupt-map-mask = <0 0 0 0>; 63 interrupt-map-mask = <0 0 0 0>;
62 interrupt-map = <0x0 0 &gic 56>; 64 interrupt-map = <0x0 0 &gic 56>;
65 num-lanes = <4>;
63 }; 66 };
64 67
65Board specific DT Entry: 68Board specific DT Entry:
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index ff7f5d855845..586134e2a382 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -248,6 +248,7 @@
248 #interrupt-cells = <1>; 248 #interrupt-cells = <1>;
249 interrupt-map-mask = <0 0 0 0>; 249 interrupt-map-mask = <0 0 0 0>;
250 interrupt-map = <0x0 0 &gic 53>; 250 interrupt-map = <0x0 0 &gic 53>;
251 num-lanes = <4>;
251 }; 252 };
252 253
253 pcie@2a0000 { 254 pcie@2a0000 {
@@ -267,5 +268,6 @@
267 #interrupt-cells = <1>; 268 #interrupt-cells = <1>;
268 interrupt-map-mask = <0 0 0 0>; 269 interrupt-map-mask = <0 0 0 0>;
269 interrupt-map = <0x0 0 &gic 56>; 270 interrupt-map = <0x0 0 &gic 56>;
271 num-lanes = <4>;
270 }; 272 };
271}; 273};
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index 0aa35f0eb0db..deb67843693c 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -320,7 +320,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
320 * are examined. 320 * are examined.
321 */ 321 */
322 322
323void __init pcibios_fixup_bus(struct pci_bus *bus) 323void pcibios_fixup_bus(struct pci_bus *bus)
324{ 324{
325#if 0 325#if 0
326 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); 326 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 082e88129712..5596c7bdd327 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) 700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
701 return -ENODEV; 701 return -ENODEV;
702 702
703 if (start > end) 703 if (start > end || !addr)
704 return -EINVAL; 704 return -EINVAL;
705 705
706 mutex_lock(&pci_mmcfg_lock); 706 mutex_lock(&pci_mmcfg_lock);
@@ -716,11 +716,6 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
716 return -EEXIST; 716 return -EEXIST;
717 } 717 }
718 718
719 if (!addr) {
720 mutex_unlock(&pci_mmcfg_lock);
721 return -EINVAL;
722 }
723
724 rc = -EBUSY; 719 rc = -EBUSY;
725 cfg = pci_mmconfig_alloc(seg, start, end, addr); 720 cfg = pci_mmconfig_alloc(seg, start, end, addr);
726 if (cfg == NULL) { 721 if (cfg == NULL) {
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 6eb18c42a28a..903fded50786 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -23,11 +23,11 @@
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26#include <linux/acpi.h>
27#include <linux/io.h>
28#include <linux/smp.h>
26 29
27#include <asm/acpi.h>
28#include <asm/segment.h> 30#include <asm/segment.h>
29#include <asm/io.h>
30#include <asm/smp.h>
31#include <asm/pci_x86.h> 31#include <asm/pci_x86.h>
32#include <asm/hw_irq.h> 32#include <asm/hw_irq.h>
33#include <asm/io_apic.h> 33#include <asm/io_apic.h>
@@ -43,7 +43,7 @@
43#define PCI_FIXED_BAR_4_SIZE 0x14 43#define PCI_FIXED_BAR_4_SIZE 0x14
44#define PCI_FIXED_BAR_5_SIZE 0x1c 44#define PCI_FIXED_BAR_5_SIZE 0x1c
45 45
46static int pci_soc_mode = 0; 46static int pci_soc_mode;
47 47
48/** 48/**
49 * fixed_bar_cap - return the offset of the fixed BAR cap if found 49 * fixed_bar_cap - return the offset of the fixed BAR cap if found
@@ -141,7 +141,8 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
141 */ 141 */
142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) 142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
143{ 143{
144 /* This is a workaround for A0 LNC bug where PCI status register does 144 /*
145 * This is a workaround for A0 LNC bug where PCI status register does
145 * not have new CAP bit set. can not be written by SW either. 146 * not have new CAP bit set. can not be written by SW either.
146 * 147 *
147 * PCI header type in real LNC indicates a single function device, this 148 * PCI header type in real LNC indicates a single function device, this
@@ -154,7 +155,7 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
154 || devfn == PCI_DEVFN(0, 0) 155 || devfn == PCI_DEVFN(0, 0)
155 || devfn == PCI_DEVFN(3, 0))) 156 || devfn == PCI_DEVFN(3, 0)))
156 return 1; 157 return 1;
157 return 0; /* langwell on others */ 158 return 0; /* Langwell on others */
158} 159}
159 160
160static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 161static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
@@ -172,7 +173,8 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
172{ 173{
173 int offset; 174 int offset;
174 175
175 /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read 176 /*
177 * On MRST, there is no PCI ROM BAR, this will cause a subsequent read
176 * to ROM BAR return 0 then being ignored. 178 * to ROM BAR return 0 then being ignored.
177 */ 179 */
178 if (where == PCI_ROM_ADDRESS) 180 if (where == PCI_ROM_ADDRESS)
@@ -210,7 +212,8 @@ static int mrst_pci_irq_enable(struct pci_dev *dev)
210 212
211 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 213 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
212 214
213 /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to 215 /*
216 * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
214 * IOAPIC RTE entries, so we just enable RTE for the device. 217 * IOAPIC RTE entries, so we just enable RTE for the device.
215 */ 218 */
216 irq_attr.ioapic = mp_find_ioapic(dev->irq); 219 irq_attr.ioapic = mp_find_ioapic(dev->irq);
@@ -235,7 +238,7 @@ struct pci_ops pci_mrst_ops = {
235 */ 238 */
236int __init pci_mrst_init(void) 239int __init pci_mrst_init(void)
237{ 240{
238 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n"); 241 pr_info("Intel MID platform detected, using MID PCI ops\n");
239 pci_mmcfg_late_init(); 242 pci_mmcfg_late_init();
240 pcibios_enable_irq = mrst_pci_irq_enable; 243 pcibios_enable_irq = mrst_pci_irq_enable;
241 pci_root_ops = pci_mrst_ops; 244 pci_root_ops = pci_mrst_ops;
@@ -244,17 +247,21 @@ int __init pci_mrst_init(void)
244 return 1; 247 return 1;
245} 248}
246 249
247/* Langwell devices are not true pci devices, they are not subject to 10 ms 250/*
248 * d3 to d0 delay required by pci spec. 251 * Langwell devices are not true PCI devices; they are not subject to 10 ms
252 * d3 to d0 delay required by PCI spec.
249 */ 253 */
250static void pci_d3delay_fixup(struct pci_dev *dev) 254static void pci_d3delay_fixup(struct pci_dev *dev)
251{ 255{
252 /* PCI fixups are effectively decided compile time. If we have a dual 256 /*
253 SoC/non-SoC kernel we don't want to mangle d3 on non SoC devices */ 257 * PCI fixups are effectively decided compile time. If we have a dual
254 if (!pci_soc_mode) 258 * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices.
255 return; 259 */
256 /* true pci devices in lincroft should allow type 1 access, the rest 260 if (!pci_soc_mode)
257 * are langwell fake pci devices. 261 return;
262 /*
263 * True PCI devices in Lincroft should allow type 1 access, the rest
264 * are Langwell fake PCI devices.
258 */ 265 */
259 if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID)) 266 if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
260 return; 267 return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e5da07858a2f..c51d2f82a93e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9935,8 +9935,6 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9935 9935
9936static int bnx2x_do_flr(struct bnx2x *bp) 9936static int bnx2x_do_flr(struct bnx2x *bp)
9937{ 9937{
9938 int i;
9939 u16 status;
9940 struct pci_dev *dev = bp->pdev; 9938 struct pci_dev *dev = bp->pdev;
9941 9939
9942 if (CHIP_IS_E1x(bp)) { 9940 if (CHIP_IS_E1x(bp)) {
@@ -9951,20 +9949,8 @@ static int bnx2x_do_flr(struct bnx2x *bp)
9951 return -EINVAL; 9949 return -EINVAL;
9952 } 9950 }
9953 9951
9954 /* Wait for Transaction Pending bit clean */ 9952 if (!pci_wait_for_pending_transaction(dev))
9955 for (i = 0; i < 4; i++) { 9953 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
9956 if (i)
9957 msleep((1 << (i - 1)) * 100);
9958
9959 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
9960 if (!(status & PCI_EXP_DEVSTA_TRPND))
9961 goto clear;
9962 }
9963
9964 dev_err(&dev->dev,
9965 "transaction is not cleared; proceeding with reset anyway\n");
9966
9967clear:
9968 9954
9969 BNX2X_DEV_INFO("Initiating FLR\n"); 9955 BNX2X_DEV_INFO("Initiating FLR\n");
9970 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 9956 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 1184ff6fe864..e5ba4eb4e5b3 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -4,6 +4,7 @@ menu "PCI host controller drivers"
4config PCI_MVEBU 4config PCI_MVEBU
5 bool "Marvell EBU PCIe controller" 5 bool "Marvell EBU PCIe controller"
6 depends on ARCH_MVEBU || ARCH_KIRKWOOD 6 depends on ARCH_MVEBU || ARCH_KIRKWOOD
7 depends on OF
7 8
8config PCIE_DW 9config PCIE_DW
9 bool 10 bool
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 086d8500e849..ab79ccb5bbff 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
2obj-$(CONFIG_PCIE_DW) += pcie-designware.o 1obj-$(CONFIG_PCIE_DW) += pcie-designware.o
2obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
3obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
new file mode 100644
index 000000000000..012ca8aec71a
--- /dev/null
+++ b/drivers/pci/host/pci-exynos.c
@@ -0,0 +1,530 @@
1/*
2 * PCIe host controller driver for Samsung EXYNOS SoCs
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/gpio.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/of_gpio.h>
21#include <linux/pci.h>
22#include <linux/platform_device.h>
23#include <linux/resource.h>
24#include <linux/signal.h>
25#include <linux/types.h>
26
27#include "pcie-designware.h"
28
29#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
30
31struct exynos_pcie {
32 void __iomem *elbi_base;
33 void __iomem *phy_base;
34 void __iomem *block_base;
35 int reset_gpio;
36 struct clk *clk;
37 struct clk *bus_clk;
38 struct pcie_port pp;
39};
40
41/* PCIe ELBI registers */
42#define PCIE_IRQ_PULSE 0x000
43#define IRQ_INTA_ASSERT (0x1 << 0)
44#define IRQ_INTB_ASSERT (0x1 << 2)
45#define IRQ_INTC_ASSERT (0x1 << 4)
46#define IRQ_INTD_ASSERT (0x1 << 6)
47#define PCIE_IRQ_LEVEL 0x004
48#define PCIE_IRQ_SPECIAL 0x008
49#define PCIE_IRQ_EN_PULSE 0x00c
50#define PCIE_IRQ_EN_LEVEL 0x010
51#define PCIE_IRQ_EN_SPECIAL 0x014
52#define PCIE_PWR_RESET 0x018
53#define PCIE_CORE_RESET 0x01c
54#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
55#define PCIE_STICKY_RESET 0x020
56#define PCIE_NONSTICKY_RESET 0x024
57#define PCIE_APP_INIT_RESET 0x028
58#define PCIE_APP_LTSSM_ENABLE 0x02c
59#define PCIE_ELBI_RDLH_LINKUP 0x064
60#define PCIE_ELBI_LTSSM_ENABLE 0x1
61#define PCIE_ELBI_SLV_AWMISC 0x11c
62#define PCIE_ELBI_SLV_ARMISC 0x120
63#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
64
65/* PCIe Purple registers */
66#define PCIE_PHY_GLOBAL_RESET 0x000
67#define PCIE_PHY_COMMON_RESET 0x004
68#define PCIE_PHY_CMN_REG 0x008
69#define PCIE_PHY_MAC_RESET 0x00c
70#define PCIE_PHY_PLL_LOCKED 0x010
71#define PCIE_PHY_TRSVREG_RESET 0x020
72#define PCIE_PHY_TRSV_RESET 0x024
73
74/* PCIe PHY registers */
75#define PCIE_PHY_IMPEDANCE 0x004
76#define PCIE_PHY_PLL_DIV_0 0x008
77#define PCIE_PHY_PLL_BIAS 0x00c
78#define PCIE_PHY_DCC_FEEDBACK 0x014
79#define PCIE_PHY_PLL_DIV_1 0x05c
80#define PCIE_PHY_TRSV0_EMP_LVL 0x084
81#define PCIE_PHY_TRSV0_DRV_LVL 0x088
82#define PCIE_PHY_TRSV0_RXCDR 0x0ac
83#define PCIE_PHY_TRSV0_LVCC 0x0dc
84#define PCIE_PHY_TRSV1_EMP_LVL 0x144
85#define PCIE_PHY_TRSV1_RXCDR 0x16c
86#define PCIE_PHY_TRSV1_LVCC 0x19c
87#define PCIE_PHY_TRSV2_EMP_LVL 0x204
88#define PCIE_PHY_TRSV2_RXCDR 0x22c
89#define PCIE_PHY_TRSV2_LVCC 0x25c
90#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
91#define PCIE_PHY_TRSV3_RXCDR 0x2ec
92#define PCIE_PHY_TRSV3_LVCC 0x31c
93
94static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
95{
96 u32 val;
97 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
98
99 if (on) {
100 val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
101 val |= PCIE_ELBI_SLV_DBI_ENABLE;
102 writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
103 } else {
104 val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
105 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
106 writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
107 }
108}
109
110static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
111{
112 u32 val;
113 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
114
115 if (on) {
116 val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
117 val |= PCIE_ELBI_SLV_DBI_ENABLE;
118 writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
119 } else {
120 val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
121 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
122 writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
123 }
124}
125
126static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
127{
128 u32 val;
129 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
130 void __iomem *elbi_base = exynos_pcie->elbi_base;
131
132 val = readl(elbi_base + PCIE_CORE_RESET);
133 val &= ~PCIE_CORE_RESET_ENABLE;
134 writel(val, elbi_base + PCIE_CORE_RESET);
135 writel(0, elbi_base + PCIE_PWR_RESET);
136 writel(0, elbi_base + PCIE_STICKY_RESET);
137 writel(0, elbi_base + PCIE_NONSTICKY_RESET);
138}
139
140static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
141{
142 u32 val;
143 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
144 void __iomem *elbi_base = exynos_pcie->elbi_base;
145 void __iomem *block_base = exynos_pcie->block_base;
146
147 val = readl(elbi_base + PCIE_CORE_RESET);
148 val |= PCIE_CORE_RESET_ENABLE;
149 writel(val, elbi_base + PCIE_CORE_RESET);
150 writel(1, elbi_base + PCIE_STICKY_RESET);
151 writel(1, elbi_base + PCIE_NONSTICKY_RESET);
152 writel(1, elbi_base + PCIE_APP_INIT_RESET);
153 writel(0, elbi_base + PCIE_APP_INIT_RESET);
154 writel(1, block_base + PCIE_PHY_MAC_RESET);
155}
156
157static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
158{
159 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
160 void __iomem *block_base = exynos_pcie->block_base;
161
162 writel(0, block_base + PCIE_PHY_MAC_RESET);
163 writel(1, block_base + PCIE_PHY_GLOBAL_RESET);
164}
165
166static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
167{
168 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
169 void __iomem *elbi_base = exynos_pcie->elbi_base;
170 void __iomem *block_base = exynos_pcie->block_base;
171
172 writel(0, block_base + PCIE_PHY_GLOBAL_RESET);
173 writel(1, elbi_base + PCIE_PWR_RESET);
174 writel(0, block_base + PCIE_PHY_COMMON_RESET);
175 writel(0, block_base + PCIE_PHY_CMN_REG);
176 writel(0, block_base + PCIE_PHY_TRSVREG_RESET);
177 writel(0, block_base + PCIE_PHY_TRSV_RESET);
178}
179
180static void exynos_pcie_init_phy(struct pcie_port *pp)
181{
182 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
183 void __iomem *phy_base = exynos_pcie->phy_base;
184
185 /* DCC feedback control off */
186 writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK);
187
188 /* set TX/RX impedance */
189 writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE);
190
191 /* set 50Mhz PHY clock */
192 writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0);
193 writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1);
194
195 /* set TX Differential output for lane 0 */
196 writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL);
197
198 /* set TX Pre-emphasis Level Control for lane 0 to minimum */
199 writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
200
201 /* set RX clock and data recovery bandwidth */
202 writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS);
203 writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR);
204 writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR);
205 writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR);
206 writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR);
207
208 /* change TX Pre-emphasis Level Control for lanes */
209 writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
210 writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL);
211 writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL);
212 writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL);
213
214 /* set LVCC */
215 writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC);
216 writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC);
217 writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC);
218 writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC);
219}
220
221static void exynos_pcie_assert_reset(struct pcie_port *pp)
222{
223 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
224
225 if (exynos_pcie->reset_gpio >= 0)
226 devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
227 GPIOF_OUT_INIT_HIGH, "RESET");
228 return;
229}
230
231static int exynos_pcie_establish_link(struct pcie_port *pp)
232{
233 u32 val;
234 int count = 0;
235 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
236 void __iomem *elbi_base = exynos_pcie->elbi_base;
237 void __iomem *block_base = exynos_pcie->block_base;
238 void __iomem *phy_base = exynos_pcie->phy_base;
239
240 if (dw_pcie_link_up(pp)) {
241 dev_err(pp->dev, "Link already up\n");
242 return 0;
243 }
244
245 /* assert reset signals */
246 exynos_pcie_assert_core_reset(pp);
247 exynos_pcie_assert_phy_reset(pp);
248
249 /* de-assert phy reset */
250 exynos_pcie_deassert_phy_reset(pp);
251
252 /* initialize phy */
253 exynos_pcie_init_phy(pp);
254
255 /* pulse for common reset */
256 writel(1, block_base + PCIE_PHY_COMMON_RESET);
257 udelay(500);
258 writel(0, block_base + PCIE_PHY_COMMON_RESET);
259
260 /* de-assert core reset */
261 exynos_pcie_deassert_core_reset(pp);
262
263 /* setup root complex */
264 dw_pcie_setup_rc(pp);
265
266 /* assert reset signal */
267 exynos_pcie_assert_reset(pp);
268
269 /* assert LTSSM enable */
270 writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE);
271
272 /* check if the link is up or not */
273 while (!dw_pcie_link_up(pp)) {
274 mdelay(100);
275 count++;
276 if (count == 10) {
277 while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) {
278 val = readl(block_base + PCIE_PHY_PLL_LOCKED);
279 dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
280 }
281 dev_err(pp->dev, "PCIe Link Fail\n");
282 return -EINVAL;
283 }
284 }
285
286 dev_info(pp->dev, "Link up\n");
287
288 return 0;
289}
290
291static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
292{
293 u32 val;
294 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
295 void __iomem *elbi_base = exynos_pcie->elbi_base;
296
297 val = readl(elbi_base + PCIE_IRQ_PULSE);
298 writel(val, elbi_base + PCIE_IRQ_PULSE);
299 return;
300}
301
302static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
303{
304 u32 val;
305 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
306 void __iomem *elbi_base = exynos_pcie->elbi_base;
307
308 /* enable INTX interrupt */
309 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
310 IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
311 writel(val, elbi_base + PCIE_IRQ_EN_PULSE);
312 return;
313}
314
315static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
316{
317 struct pcie_port *pp = arg;
318
319 exynos_pcie_clear_irq_pulse(pp);
320 return IRQ_HANDLED;
321}
322
323static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
324{
325 exynos_pcie_enable_irq_pulse(pp);
326 return;
327}
328
329static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
330 void __iomem *dbi_base, u32 *val)
331{
332 exynos_pcie_sideband_dbi_r_mode(pp, true);
333 *val = readl(dbi_base);
334 exynos_pcie_sideband_dbi_r_mode(pp, false);
335 return;
336}
337
338static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
339 u32 val, void __iomem *dbi_base)
340{
341 exynos_pcie_sideband_dbi_w_mode(pp, true);
342 writel(val, dbi_base);
343 exynos_pcie_sideband_dbi_w_mode(pp, false);
344 return;
345}
346
347static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
348 u32 *val)
349{
350 int ret;
351
352 exynos_pcie_sideband_dbi_r_mode(pp, true);
353 ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
354 exynos_pcie_sideband_dbi_r_mode(pp, false);
355 return ret;
356}
357
358static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
359 u32 val)
360{
361 int ret;
362
363 exynos_pcie_sideband_dbi_w_mode(pp, true);
364 ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
365 exynos_pcie_sideband_dbi_w_mode(pp, false);
366 return ret;
367}
368
369static int exynos_pcie_link_up(struct pcie_port *pp)
370{
371 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
372 u32 val = readl(exynos_pcie->elbi_base + PCIE_ELBI_RDLH_LINKUP);
373
374 if (val == PCIE_ELBI_LTSSM_ENABLE)
375 return 1;
376
377 return 0;
378}
379
380static void exynos_pcie_host_init(struct pcie_port *pp)
381{
382 exynos_pcie_establish_link(pp);
383 exynos_pcie_enable_interrupts(pp);
384}
385
386static struct pcie_host_ops exynos_pcie_host_ops = {
387 .readl_rc = exynos_pcie_readl_rc,
388 .writel_rc = exynos_pcie_writel_rc,
389 .rd_own_conf = exynos_pcie_rd_own_conf,
390 .wr_own_conf = exynos_pcie_wr_own_conf,
391 .link_up = exynos_pcie_link_up,
392 .host_init = exynos_pcie_host_init,
393};
394
395static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
396{
397 int ret;
398
399 pp->irq = platform_get_irq(pdev, 1);
400 if (!pp->irq) {
401 dev_err(&pdev->dev, "failed to get irq\n");
402 return -ENODEV;
403 }
404 ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
405 IRQF_SHARED, "exynos-pcie", pp);
406 if (ret) {
407 dev_err(&pdev->dev, "failed to request irq\n");
408 return ret;
409 }
410
411 pp->root_bus_nr = -1;
412 pp->ops = &exynos_pcie_host_ops;
413
414 spin_lock_init(&pp->conf_lock);
415 ret = dw_pcie_host_init(pp);
416 if (ret) {
417 dev_err(&pdev->dev, "failed to initialize host\n");
418 return ret;
419 }
420
421 return 0;
422}
423
424static int __init exynos_pcie_probe(struct platform_device *pdev)
425{
426 struct exynos_pcie *exynos_pcie;
427 struct pcie_port *pp;
428 struct device_node *np = pdev->dev.of_node;
429 struct resource *elbi_base;
430 struct resource *phy_base;
431 struct resource *block_base;
432 int ret;
433
434 exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
435 GFP_KERNEL);
436 if (!exynos_pcie) {
437 dev_err(&pdev->dev, "no memory for exynos pcie\n");
438 return -ENOMEM;
439 }
440
441 pp = &exynos_pcie->pp;
442
443 pp->dev = &pdev->dev;
444
445 exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
446
447 exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
448 if (IS_ERR(exynos_pcie->clk)) {
449 dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
450 return PTR_ERR(exynos_pcie->clk);
451 }
452 ret = clk_prepare_enable(exynos_pcie->clk);
453 if (ret)
454 return ret;
455
456 exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
457 if (IS_ERR(exynos_pcie->bus_clk)) {
458 dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
459 ret = PTR_ERR(exynos_pcie->bus_clk);
460 goto fail_clk;
461 }
462 ret = clk_prepare_enable(exynos_pcie->bus_clk);
463 if (ret)
464 goto fail_clk;
465
466 elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
467 exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
468 if (IS_ERR(exynos_pcie->elbi_base))
469 return PTR_ERR(exynos_pcie->elbi_base);
470
471 phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
472 exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
473 if (IS_ERR(exynos_pcie->phy_base))
474 return PTR_ERR(exynos_pcie->phy_base);
475
476 block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
477 exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
478 if (IS_ERR(exynos_pcie->block_base))
479 return PTR_ERR(exynos_pcie->block_base);
480
481 ret = add_pcie_port(pp, pdev);
482 if (ret < 0)
483 goto fail_bus_clk;
484
485 platform_set_drvdata(pdev, exynos_pcie);
486 return 0;
487
488fail_bus_clk:
489 clk_disable_unprepare(exynos_pcie->bus_clk);
490fail_clk:
491 clk_disable_unprepare(exynos_pcie->clk);
492 return ret;
493}
494
495static int __exit exynos_pcie_remove(struct platform_device *pdev)
496{
497 struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev);
498
499 clk_disable_unprepare(exynos_pcie->bus_clk);
500 clk_disable_unprepare(exynos_pcie->clk);
501
502 return 0;
503}
504
505static const struct of_device_id exynos_pcie_of_match[] = {
506 { .compatible = "samsung,exynos5440-pcie", },
507 {},
508};
509MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
510
511static struct platform_driver exynos_pcie_driver = {
512 .remove = __exit_p(exynos_pcie_remove),
513 .driver = {
514 .name = "exynos-pcie",
515 .owner = THIS_MODULE,
516 .of_match_table = of_match_ptr(exynos_pcie_of_match),
517 },
518};
519
520/* Exynos PCIe driver does not allow module unload */
521
522static int __init pcie_init(void)
523{
524 return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
525}
526subsys_initcall(pcie_init);
527
528MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
529MODULE_DESCRIPTION("Samsung PCIe host controller driver");
530MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13a633b1612e..8fc2a8241fa8 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -750,9 +750,9 @@ mvebu_pcie_map_registers(struct platform_device *pdev,
750 750
751 ret = of_address_to_resource(np, 0, &regs); 751 ret = of_address_to_resource(np, 0, &regs);
752 if (ret) 752 if (ret)
753 return NULL; 753 return ERR_PTR(ret);
754 754
755 return devm_request_and_ioremap(&pdev->dev, &regs); 755 return devm_ioremap_resource(&pdev->dev, &regs);
756} 756}
757 757
758static int __init mvebu_pcie_probe(struct platform_device *pdev) 758static int __init mvebu_pcie_probe(struct platform_device *pdev)
@@ -842,9 +842,10 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
842 continue; 842 continue;
843 843
844 port->base = mvebu_pcie_map_registers(pdev, child, port); 844 port->base = mvebu_pcie_map_registers(pdev, child, port);
845 if (!port->base) { 845 if (IS_ERR(port->base)) {
846 dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", 846 dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
847 port->port, port->lane); 847 port->port, port->lane);
848 port->base = NULL;
848 continue; 849 continue;
849 } 850 }
850 851
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 26bdbda8ff90..77b0c257f215 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PCIe host controller driver for Samsung EXYNOS SoCs 2 * Synopsys Designware PCIe host controller driver
3 * 3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com 5 * http://www.samsung.com
@@ -11,74 +11,28 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/gpio.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h> 14#include <linux/kernel.h>
19#include <linux/list.h>
20#include <linux/module.h> 15#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_address.h> 16#include <linux/of_address.h>
23#include <linux/of_gpio.h>
24#include <linux/of_pci.h>
25#include <linux/pci.h> 17#include <linux/pci.h>
26#include <linux/pci_regs.h> 18#include <linux/pci_regs.h>
27#include <linux/platform_device.h>
28#include <linux/resource.h>
29#include <linux/signal.h>
30#include <linux/slab.h>
31#include <linux/types.h> 19#include <linux/types.h>
32 20
33struct pcie_port_info { 21#include "pcie-designware.h"
34 u32 cfg0_size;
35 u32 cfg1_size;
36 u32 io_size;
37 u32 mem_size;
38 phys_addr_t io_bus_addr;
39 phys_addr_t mem_bus_addr;
40};
41
42struct pcie_port {
43 struct device *dev;
44 u8 controller;
45 u8 root_bus_nr;
46 void __iomem *dbi_base;
47 void __iomem *elbi_base;
48 void __iomem *phy_base;
49 void __iomem *purple_base;
50 u64 cfg0_base;
51 void __iomem *va_cfg0_base;
52 u64 cfg1_base;
53 void __iomem *va_cfg1_base;
54 u64 io_base;
55 u64 mem_base;
56 spinlock_t conf_lock;
57 struct resource cfg;
58 struct resource io;
59 struct resource mem;
60 struct pcie_port_info config;
61 struct clk *clk;
62 struct clk *bus_clk;
63 int irq;
64 int reset_gpio;
65};
66
67/*
68 * Exynos PCIe IP consists of Synopsys specific part and Exynos
69 * specific part. Only core block is a Synopsys designware part;
70 * other parts are Exynos specific.
71 */
72 22
73/* Synopsis specific PCIE configuration registers */ 23/* Synopsis specific PCIE configuration registers */
74#define PCIE_PORT_LINK_CONTROL 0x710 24#define PCIE_PORT_LINK_CONTROL 0x710
75#define PORT_LINK_MODE_MASK (0x3f << 16) 25#define PORT_LINK_MODE_MASK (0x3f << 16)
26#define PORT_LINK_MODE_1_LANES (0x1 << 16)
27#define PORT_LINK_MODE_2_LANES (0x3 << 16)
76#define PORT_LINK_MODE_4_LANES (0x7 << 16) 28#define PORT_LINK_MODE_4_LANES (0x7 << 16)
77 29
78#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C 30#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
79#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) 31#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
80#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) 32#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
81#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x7 << 8) 33#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
34#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
35#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
82 36
83#define PCIE_MSI_ADDR_LO 0x820 37#define PCIE_MSI_ADDR_LO 0x820
84#define PCIE_MSI_ADDR_HI 0x824 38#define PCIE_MSI_ADDR_HI 0x824
@@ -108,69 +62,16 @@ struct pcie_port {
108#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) 62#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
109#define PCIE_ATU_UPPER_TARGET 0x91C 63#define PCIE_ATU_UPPER_TARGET 0x91C
110 64
111/* Exynos specific PCIE configuration registers */ 65static struct hw_pci dw_pci;
112 66
113/* PCIe ELBI registers */ 67unsigned long global_io_offset;
114#define PCIE_IRQ_PULSE 0x000
115#define IRQ_INTA_ASSERT (0x1 << 0)
116#define IRQ_INTB_ASSERT (0x1 << 2)
117#define IRQ_INTC_ASSERT (0x1 << 4)
118#define IRQ_INTD_ASSERT (0x1 << 6)
119#define PCIE_IRQ_LEVEL 0x004
120#define PCIE_IRQ_SPECIAL 0x008
121#define PCIE_IRQ_EN_PULSE 0x00c
122#define PCIE_IRQ_EN_LEVEL 0x010
123#define PCIE_IRQ_EN_SPECIAL 0x014
124#define PCIE_PWR_RESET 0x018
125#define PCIE_CORE_RESET 0x01c
126#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
127#define PCIE_STICKY_RESET 0x020
128#define PCIE_NONSTICKY_RESET 0x024
129#define PCIE_APP_INIT_RESET 0x028
130#define PCIE_APP_LTSSM_ENABLE 0x02c
131#define PCIE_ELBI_RDLH_LINKUP 0x064
132#define PCIE_ELBI_LTSSM_ENABLE 0x1
133#define PCIE_ELBI_SLV_AWMISC 0x11c
134#define PCIE_ELBI_SLV_ARMISC 0x120
135#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
136
137/* PCIe Purple registers */
138#define PCIE_PHY_GLOBAL_RESET 0x000
139#define PCIE_PHY_COMMON_RESET 0x004
140#define PCIE_PHY_CMN_REG 0x008
141#define PCIE_PHY_MAC_RESET 0x00c
142#define PCIE_PHY_PLL_LOCKED 0x010
143#define PCIE_PHY_TRSVREG_RESET 0x020
144#define PCIE_PHY_TRSV_RESET 0x024
145
146/* PCIe PHY registers */
147#define PCIE_PHY_IMPEDANCE 0x004
148#define PCIE_PHY_PLL_DIV_0 0x008
149#define PCIE_PHY_PLL_BIAS 0x00c
150#define PCIE_PHY_DCC_FEEDBACK 0x014
151#define PCIE_PHY_PLL_DIV_1 0x05c
152#define PCIE_PHY_TRSV0_EMP_LVL 0x084
153#define PCIE_PHY_TRSV0_DRV_LVL 0x088
154#define PCIE_PHY_TRSV0_RXCDR 0x0ac
155#define PCIE_PHY_TRSV0_LVCC 0x0dc
156#define PCIE_PHY_TRSV1_EMP_LVL 0x144
157#define PCIE_PHY_TRSV1_RXCDR 0x16c
158#define PCIE_PHY_TRSV1_LVCC 0x19c
159#define PCIE_PHY_TRSV2_EMP_LVL 0x204
160#define PCIE_PHY_TRSV2_RXCDR 0x22c
161#define PCIE_PHY_TRSV2_LVCC 0x25c
162#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
163#define PCIE_PHY_TRSV3_RXCDR 0x2ec
164#define PCIE_PHY_TRSV3_LVCC 0x31c
165
166static struct hw_pci exynos_pci;
167 68
168static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) 69static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
169{ 70{
170 return sys->private_data; 71 return sys->private_data;
171} 72}
172 73
173static inline int cfg_read(void *addr, int where, int size, u32 *val) 74int cfg_read(void __iomem *addr, int where, int size, u32 *val)
174{ 75{
175 *val = readl(addr); 76 *val = readl(addr);
176 77
@@ -184,7 +85,7 @@ static inline int cfg_read(void *addr, int where, int size, u32 *val)
184 return PCIBIOS_SUCCESSFUL; 85 return PCIBIOS_SUCCESSFUL;
185} 86}
186 87
187static inline int cfg_write(void *addr, int where, int size, u32 val) 88int cfg_write(void __iomem *addr, int where, int size, u32 val)
188{ 89{
189 if (size == 4) 90 if (size == 4)
190 writel(val, addr); 91 writel(val, addr);
@@ -198,155 +99,241 @@ static inline int cfg_write(void *addr, int where, int size, u32 val)
198 return PCIBIOS_SUCCESSFUL; 99 return PCIBIOS_SUCCESSFUL;
199} 100}
200 101
201static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on) 102static inline void dw_pcie_readl_rc(struct pcie_port *pp,
103 void __iomem *dbi_addr, u32 *val)
202{ 104{
203 u32 val; 105 if (pp->ops->readl_rc)
204 106 pp->ops->readl_rc(pp, dbi_addr, val);
205 if (on) { 107 else
206 val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC); 108 *val = readl(dbi_addr);
207 val |= PCIE_ELBI_SLV_DBI_ENABLE;
208 writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
209 } else {
210 val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
211 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
212 writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
213 }
214}
215
216static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
217{
218 u32 val;
219
220 if (on) {
221 val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
222 val |= PCIE_ELBI_SLV_DBI_ENABLE;
223 writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
224 } else {
225 val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
226 val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
227 writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
228 }
229}
230
231static inline void readl_rc(struct pcie_port *pp, void *dbi_base, u32 *val)
232{
233 exynos_pcie_sideband_dbi_r_mode(pp, true);
234 *val = readl(dbi_base);
235 exynos_pcie_sideband_dbi_r_mode(pp, false);
236 return;
237} 109}
238 110
239static inline void writel_rc(struct pcie_port *pp, u32 val, void *dbi_base) 111static inline void dw_pcie_writel_rc(struct pcie_port *pp,
112 u32 val, void __iomem *dbi_addr)
240{ 113{
241 exynos_pcie_sideband_dbi_w_mode(pp, true); 114 if (pp->ops->writel_rc)
242 writel(val, dbi_base); 115 pp->ops->writel_rc(pp, val, dbi_addr);
243 exynos_pcie_sideband_dbi_w_mode(pp, false); 116 else
244 return; 117 writel(val, dbi_addr);
245} 118}
246 119
247static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, 120int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
248 u32 *val) 121 u32 *val)
249{ 122{
250 int ret; 123 int ret;
251 124
252 exynos_pcie_sideband_dbi_r_mode(pp, true); 125 if (pp->ops->rd_own_conf)
253 ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); 126 ret = pp->ops->rd_own_conf(pp, where, size, val);
254 exynos_pcie_sideband_dbi_r_mode(pp, false); 127 else
128 ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
129
255 return ret; 130 return ret;
256} 131}
257 132
258static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, 133int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
259 u32 val) 134 u32 val)
260{ 135{
261 int ret; 136 int ret;
262 137
263 exynos_pcie_sideband_dbi_w_mode(pp, true); 138 if (pp->ops->wr_own_conf)
264 ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val); 139 ret = pp->ops->wr_own_conf(pp, where, size, val);
265 exynos_pcie_sideband_dbi_w_mode(pp, false); 140 else
141 ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size,
142 val);
143
266 return ret; 144 return ret;
267} 145}
268 146
269static void exynos_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) 147int dw_pcie_link_up(struct pcie_port *pp)
148{
149 if (pp->ops->link_up)
150 return pp->ops->link_up(pp);
151 else
152 return 0;
153}
154
155int __init dw_pcie_host_init(struct pcie_port *pp)
156{
157 struct device_node *np = pp->dev->of_node;
158 struct of_pci_range range;
159 struct of_pci_range_parser parser;
160 u32 val;
161
162 if (of_pci_range_parser_init(&parser, np)) {
163 dev_err(pp->dev, "missing ranges property\n");
164 return -EINVAL;
165 }
166
167 /* Get the I/O and memory ranges from DT */
168 for_each_of_pci_range(&parser, &range) {
169 unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
170 if (restype == IORESOURCE_IO) {
171 of_pci_range_to_resource(&range, np, &pp->io);
172 pp->io.name = "I/O";
173 pp->io.start = max_t(resource_size_t,
174 PCIBIOS_MIN_IO,
175 range.pci_addr + global_io_offset);
176 pp->io.end = min_t(resource_size_t,
177 IO_SPACE_LIMIT,
178 range.pci_addr + range.size
179 + global_io_offset);
180 pp->config.io_size = resource_size(&pp->io);
181 pp->config.io_bus_addr = range.pci_addr;
182 }
183 if (restype == IORESOURCE_MEM) {
184 of_pci_range_to_resource(&range, np, &pp->mem);
185 pp->mem.name = "MEM";
186 pp->config.mem_size = resource_size(&pp->mem);
187 pp->config.mem_bus_addr = range.pci_addr;
188 }
189 if (restype == 0) {
190 of_pci_range_to_resource(&range, np, &pp->cfg);
191 pp->config.cfg0_size = resource_size(&pp->cfg)/2;
192 pp->config.cfg1_size = resource_size(&pp->cfg)/2;
193 }
194 }
195
196 if (!pp->dbi_base) {
197 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
198 resource_size(&pp->cfg));
199 if (!pp->dbi_base) {
200 dev_err(pp->dev, "error with ioremap\n");
201 return -ENOMEM;
202 }
203 }
204
205 pp->cfg0_base = pp->cfg.start;
206 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
207 pp->io_base = pp->io.start;
208 pp->mem_base = pp->mem.start;
209
210 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
211 pp->config.cfg0_size);
212 if (!pp->va_cfg0_base) {
213 dev_err(pp->dev, "error with ioremap in function\n");
214 return -ENOMEM;
215 }
216 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
217 pp->config.cfg1_size);
218 if (!pp->va_cfg1_base) {
219 dev_err(pp->dev, "error with ioremap\n");
220 return -ENOMEM;
221 }
222
223 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
224 dev_err(pp->dev, "Failed to parse the number of lanes\n");
225 return -EINVAL;
226 }
227
228 if (pp->ops->host_init)
229 pp->ops->host_init(pp);
230
231 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
232
233 /* program correct class for RC */
234 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
235
236 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
237 val |= PORT_LOGIC_SPEED_CHANGE;
238 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
239
240 dw_pci.nr_controllers = 1;
241 dw_pci.private_data = (void **)&pp;
242
243 pci_common_init(&dw_pci);
244 pci_assign_unassigned_resources();
245#ifdef CONFIG_PCI_DOMAINS
246 dw_pci.domain++;
247#endif
248
249 return 0;
250}
251
252static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
270{ 253{
271 u32 val; 254 u32 val;
272 void __iomem *dbi_base = pp->dbi_base; 255 void __iomem *dbi_base = pp->dbi_base;
273 256
274 /* Program viewport 0 : OUTBOUND : CFG0 */ 257 /* Program viewport 0 : OUTBOUND : CFG0 */
275 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; 258 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
276 writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); 259 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
277 writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE); 260 dw_pcie_writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE);
278 writel_rc(pp, (pp->cfg0_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); 261 dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32),
279 writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1, 262 dbi_base + PCIE_ATU_UPPER_BASE);
263 dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
280 dbi_base + PCIE_ATU_LIMIT); 264 dbi_base + PCIE_ATU_LIMIT);
281 writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); 265 dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
282 writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); 266 dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
283 writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1); 267 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1);
284 val = PCIE_ATU_ENABLE; 268 val = PCIE_ATU_ENABLE;
285 writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); 269 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
286} 270}
287 271
288static void exynos_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) 272static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
289{ 273{
290 u32 val; 274 u32 val;
291 void __iomem *dbi_base = pp->dbi_base; 275 void __iomem *dbi_base = pp->dbi_base;
292 276
293 /* Program viewport 1 : OUTBOUND : CFG1 */ 277 /* Program viewport 1 : OUTBOUND : CFG1 */
294 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; 278 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
295 writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); 279 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
296 writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1); 280 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1);
297 val = PCIE_ATU_ENABLE; 281 val = PCIE_ATU_ENABLE;
298 writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); 282 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
299 writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE); 283 dw_pcie_writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE);
300 writel_rc(pp, (pp->cfg1_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); 284 dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32),
301 writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, 285 dbi_base + PCIE_ATU_UPPER_BASE);
286 dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
302 dbi_base + PCIE_ATU_LIMIT); 287 dbi_base + PCIE_ATU_LIMIT);
303 writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET); 288 dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
304 writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET); 289 dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
305} 290}
306 291
307static void exynos_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) 292static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
308{ 293{
309 u32 val; 294 u32 val;
310 void __iomem *dbi_base = pp->dbi_base; 295 void __iomem *dbi_base = pp->dbi_base;
311 296
312 /* Program viewport 0 : OUTBOUND : MEM */ 297 /* Program viewport 0 : OUTBOUND : MEM */
313 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0; 298 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
314 writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); 299 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
315 writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1); 300 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1);
316 val = PCIE_ATU_ENABLE; 301 val = PCIE_ATU_ENABLE;
317 writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); 302 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
318 writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE); 303 dw_pcie_writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE);
319 writel_rc(pp, (pp->mem_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); 304 dw_pcie_writel_rc(pp, (pp->mem_base >> 32),
320 writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, 305 dbi_base + PCIE_ATU_UPPER_BASE);
306 dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
321 dbi_base + PCIE_ATU_LIMIT); 307 dbi_base + PCIE_ATU_LIMIT);
322 writel_rc(pp, pp->config.mem_bus_addr, 308 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr,
323 dbi_base + PCIE_ATU_LOWER_TARGET); 309 dbi_base + PCIE_ATU_LOWER_TARGET);
324 writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), 310 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
325 dbi_base + PCIE_ATU_UPPER_TARGET); 311 dbi_base + PCIE_ATU_UPPER_TARGET);
326} 312}
327 313
328static void exynos_pcie_prog_viewport_io_outbound(struct pcie_port *pp) 314static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
329{ 315{
330 u32 val; 316 u32 val;
331 void __iomem *dbi_base = pp->dbi_base; 317 void __iomem *dbi_base = pp->dbi_base;
332 318
333 /* Program viewport 1 : OUTBOUND : IO */ 319 /* Program viewport 1 : OUTBOUND : IO */
334 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1; 320 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
335 writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT); 321 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
336 writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1); 322 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1);
337 val = PCIE_ATU_ENABLE; 323 val = PCIE_ATU_ENABLE;
338 writel_rc(pp, val, dbi_base + PCIE_ATU_CR2); 324 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
339 writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE); 325 dw_pcie_writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE);
340 writel_rc(pp, (pp->io_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE); 326 dw_pcie_writel_rc(pp, (pp->io_base >> 32),
341 writel_rc(pp, pp->io_base + pp->config.io_size - 1, 327 dbi_base + PCIE_ATU_UPPER_BASE);
328 dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
342 dbi_base + PCIE_ATU_LIMIT); 329 dbi_base + PCIE_ATU_LIMIT);
343 writel_rc(pp, pp->config.io_bus_addr, 330 dw_pcie_writel_rc(pp, pp->config.io_bus_addr,
344 dbi_base + PCIE_ATU_LOWER_TARGET); 331 dbi_base + PCIE_ATU_LOWER_TARGET);
345 writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), 332 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
346 dbi_base + PCIE_ATU_UPPER_TARGET); 333 dbi_base + PCIE_ATU_UPPER_TARGET);
347} 334}
348 335
349static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 336static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
350 u32 devfn, int where, int size, u32 *val) 337 u32 devfn, int where, int size, u32 *val)
351{ 338{
352 int ret = PCIBIOS_SUCCESSFUL; 339 int ret = PCIBIOS_SUCCESSFUL;
@@ -357,19 +344,19 @@ static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
357 address = where & ~0x3; 344 address = where & ~0x3;
358 345
359 if (bus->parent->number == pp->root_bus_nr) { 346 if (bus->parent->number == pp->root_bus_nr) {
360 exynos_pcie_prog_viewport_cfg0(pp, busdev); 347 dw_pcie_prog_viewport_cfg0(pp, busdev);
361 ret = cfg_read(pp->va_cfg0_base + address, where, size, val); 348 ret = cfg_read(pp->va_cfg0_base + address, where, size, val);
362 exynos_pcie_prog_viewport_mem_outbound(pp); 349 dw_pcie_prog_viewport_mem_outbound(pp);
363 } else { 350 } else {
364 exynos_pcie_prog_viewport_cfg1(pp, busdev); 351 dw_pcie_prog_viewport_cfg1(pp, busdev);
365 ret = cfg_read(pp->va_cfg1_base + address, where, size, val); 352 ret = cfg_read(pp->va_cfg1_base + address, where, size, val);
366 exynos_pcie_prog_viewport_io_outbound(pp); 353 dw_pcie_prog_viewport_io_outbound(pp);
367 } 354 }
368 355
369 return ret; 356 return ret;
370} 357}
371 358
372static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 359static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
373 u32 devfn, int where, int size, u32 val) 360 u32 devfn, int where, int size, u32 val)
374{ 361{
375 int ret = PCIBIOS_SUCCESSFUL; 362 int ret = PCIBIOS_SUCCESSFUL;
@@ -380,59 +367,25 @@ static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
380 address = where & ~0x3; 367 address = where & ~0x3;
381 368
382 if (bus->parent->number == pp->root_bus_nr) { 369 if (bus->parent->number == pp->root_bus_nr) {
383 exynos_pcie_prog_viewport_cfg0(pp, busdev); 370 dw_pcie_prog_viewport_cfg0(pp, busdev);
384 ret = cfg_write(pp->va_cfg0_base + address, where, size, val); 371 ret = cfg_write(pp->va_cfg0_base + address, where, size, val);
385 exynos_pcie_prog_viewport_mem_outbound(pp); 372 dw_pcie_prog_viewport_mem_outbound(pp);
386 } else { 373 } else {
387 exynos_pcie_prog_viewport_cfg1(pp, busdev); 374 dw_pcie_prog_viewport_cfg1(pp, busdev);
388 ret = cfg_write(pp->va_cfg1_base + address, where, size, val); 375 ret = cfg_write(pp->va_cfg1_base + address, where, size, val);
389 exynos_pcie_prog_viewport_io_outbound(pp); 376 dw_pcie_prog_viewport_io_outbound(pp);
390 } 377 }
391 378
392 return ret; 379 return ret;
393} 380}
394 381
395static unsigned long global_io_offset;
396 382
397static int exynos_pcie_setup(int nr, struct pci_sys_data *sys) 383static int dw_pcie_valid_config(struct pcie_port *pp,
398{
399 struct pcie_port *pp;
400
401 pp = sys_to_pcie(sys);
402
403 if (!pp)
404 return 0;
405
406 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
407 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
408 pci_ioremap_io(sys->io_offset, pp->io.start);
409 global_io_offset += SZ_64K;
410 pci_add_resource_offset(&sys->resources, &pp->io,
411 sys->io_offset);
412 }
413
414 sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
415 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
416
417 return 1;
418}
419
420static int exynos_pcie_link_up(struct pcie_port *pp)
421{
422 u32 val = readl(pp->elbi_base + PCIE_ELBI_RDLH_LINKUP);
423
424 if (val == PCIE_ELBI_LTSSM_ENABLE)
425 return 1;
426
427 return 0;
428}
429
430static int exynos_pcie_valid_config(struct pcie_port *pp,
431 struct pci_bus *bus, int dev) 384 struct pci_bus *bus, int dev)
432{ 385{
433 /* If there is no link, then there is no device */ 386 /* If there is no link, then there is no device */
434 if (bus->number != pp->root_bus_nr) { 387 if (bus->number != pp->root_bus_nr) {
435 if (!exynos_pcie_link_up(pp)) 388 if (!dw_pcie_link_up(pp))
436 return 0; 389 return 0;
437 } 390 }
438 391
@@ -450,7 +403,7 @@ static int exynos_pcie_valid_config(struct pcie_port *pp,
450 return 1; 403 return 1;
451} 404}
452 405
453static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 406static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
454 int size, u32 *val) 407 int size, u32 *val)
455{ 408{
456 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 409 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -462,23 +415,23 @@ static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
462 return -EINVAL; 415 return -EINVAL;
463 } 416 }
464 417
465 if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { 418 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
466 *val = 0xffffffff; 419 *val = 0xffffffff;
467 return PCIBIOS_DEVICE_NOT_FOUND; 420 return PCIBIOS_DEVICE_NOT_FOUND;
468 } 421 }
469 422
470 spin_lock_irqsave(&pp->conf_lock, flags); 423 spin_lock_irqsave(&pp->conf_lock, flags);
471 if (bus->number != pp->root_bus_nr) 424 if (bus->number != pp->root_bus_nr)
472 ret = exynos_pcie_rd_other_conf(pp, bus, devfn, 425 ret = dw_pcie_rd_other_conf(pp, bus, devfn,
473 where, size, val); 426 where, size, val);
474 else 427 else
475 ret = exynos_pcie_rd_own_conf(pp, where, size, val); 428 ret = dw_pcie_rd_own_conf(pp, where, size, val);
476 spin_unlock_irqrestore(&pp->conf_lock, flags); 429 spin_unlock_irqrestore(&pp->conf_lock, flags);
477 430
478 return ret; 431 return ret;
479} 432}
480 433
481static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 434static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
482 int where, int size, u32 val) 435 int where, int size, u32 val)
483{ 436{
484 struct pcie_port *pp = sys_to_pcie(bus->sysdata); 437 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -490,34 +443,56 @@ static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
490 return -EINVAL; 443 return -EINVAL;
491 } 444 }
492 445
493 if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) 446 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
494 return PCIBIOS_DEVICE_NOT_FOUND; 447 return PCIBIOS_DEVICE_NOT_FOUND;
495 448
496 spin_lock_irqsave(&pp->conf_lock, flags); 449 spin_lock_irqsave(&pp->conf_lock, flags);
497 if (bus->number != pp->root_bus_nr) 450 if (bus->number != pp->root_bus_nr)
498 ret = exynos_pcie_wr_other_conf(pp, bus, devfn, 451 ret = dw_pcie_wr_other_conf(pp, bus, devfn,
499 where, size, val); 452 where, size, val);
500 else 453 else
501 ret = exynos_pcie_wr_own_conf(pp, where, size, val); 454 ret = dw_pcie_wr_own_conf(pp, where, size, val);
502 spin_unlock_irqrestore(&pp->conf_lock, flags); 455 spin_unlock_irqrestore(&pp->conf_lock, flags);
503 456
504 return ret; 457 return ret;
505} 458}
506 459
507static struct pci_ops exynos_pcie_ops = { 460static struct pci_ops dw_pcie_ops = {
508 .read = exynos_pcie_rd_conf, 461 .read = dw_pcie_rd_conf,
509 .write = exynos_pcie_wr_conf, 462 .write = dw_pcie_wr_conf,
510}; 463};
511 464
512static struct pci_bus *exynos_pcie_scan_bus(int nr, 465int dw_pcie_setup(int nr, struct pci_sys_data *sys)
513 struct pci_sys_data *sys) 466{
467 struct pcie_port *pp;
468
469 pp = sys_to_pcie(sys);
470
471 if (!pp)
472 return 0;
473
474 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
475 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
476 pci_ioremap_io(sys->io_offset, pp->io.start);
477 global_io_offset += SZ_64K;
478 pci_add_resource_offset(&sys->resources, &pp->io,
479 sys->io_offset);
480 }
481
482 sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
483 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
484
485 return 1;
486}
487
488struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
514{ 489{
515 struct pci_bus *bus; 490 struct pci_bus *bus;
516 struct pcie_port *pp = sys_to_pcie(sys); 491 struct pcie_port *pp = sys_to_pcie(sys);
517 492
518 if (pp) { 493 if (pp) {
519 pp->root_bus_nr = sys->busnr; 494 pp->root_bus_nr = sys->busnr;
520 bus = pci_scan_root_bus(NULL, sys->busnr, &exynos_pcie_ops, 495 bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops,
521 sys, &sys->resources); 496 sys, &sys->resources);
522 } else { 497 } else {
523 bus = NULL; 498 bus = NULL;
@@ -527,20 +502,20 @@ static struct pci_bus *exynos_pcie_scan_bus(int nr,
527 return bus; 502 return bus;
528} 503}
529 504
530static int exynos_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 505int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
531{ 506{
532 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); 507 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
533 508
534 return pp->irq; 509 return pp->irq;
535} 510}
536 511
537static struct hw_pci exynos_pci = { 512static struct hw_pci dw_pci = {
538 .setup = exynos_pcie_setup, 513 .setup = dw_pcie_setup,
539 .scan = exynos_pcie_scan_bus, 514 .scan = dw_pcie_scan_bus,
540 .map_irq = exynos_pcie_map_irq, 515 .map_irq = dw_pcie_map_irq,
541}; 516};
542 517
543static void exynos_pcie_setup_rc(struct pcie_port *pp) 518void dw_pcie_setup_rc(struct pcie_port *pp)
544{ 519{
545 struct pcie_port_info *config = &pp->config; 520 struct pcie_port_info *config = &pp->config;
546 void __iomem *dbi_base = pp->dbi_base; 521 void __iomem *dbi_base = pp->dbi_base;
@@ -549,509 +524,67 @@ static void exynos_pcie_setup_rc(struct pcie_port *pp)
549 u32 memlimit; 524 u32 memlimit;
550 525
551 /* set the number of lines as 4 */ 526 /* set the number of lines as 4 */
552 readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val); 527 dw_pcie_readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val);
553 val &= ~PORT_LINK_MODE_MASK; 528 val &= ~PORT_LINK_MODE_MASK;
554 val |= PORT_LINK_MODE_4_LANES; 529 switch (pp->lanes) {
555 writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL); 530 case 1:
531 val |= PORT_LINK_MODE_1_LANES;
532 break;
533 case 2:
534 val |= PORT_LINK_MODE_2_LANES;
535 break;
536 case 4:
537 val |= PORT_LINK_MODE_4_LANES;
538 break;
539 }
540 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL);
556 541
557 /* set link width speed control register */ 542 /* set link width speed control register */
558 readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val); 543 dw_pcie_readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
559 val &= ~PORT_LOGIC_LINK_WIDTH_MASK; 544 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
560 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 545 switch (pp->lanes) {
561 writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); 546 case 1:
547 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
548 break;
549 case 2:
550 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
551 break;
552 case 4:
553 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
554 break;
555 }
556 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
562 557
563 /* setup RC BARs */ 558 /* setup RC BARs */
564 writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0); 559 dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0);
565 writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1); 560 dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1);
566 561
567 /* setup interrupt pins */ 562 /* setup interrupt pins */
568 readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val); 563 dw_pcie_readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val);
569 val &= 0xffff00ff; 564 val &= 0xffff00ff;
570 val |= 0x00000100; 565 val |= 0x00000100;
571 writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE); 566 dw_pcie_writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE);
572 567
573 /* setup bus numbers */ 568 /* setup bus numbers */
574 readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val); 569 dw_pcie_readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val);
575 val &= 0xff000000; 570 val &= 0xff000000;
576 val |= 0x00010100; 571 val |= 0x00010100;
577 writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS); 572 dw_pcie_writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS);
578 573
579 /* setup memory base, memory limit */ 574 /* setup memory base, memory limit */
580 membase = ((u32)pp->mem_base & 0xfff00000) >> 16; 575 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
581 memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000; 576 memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
582 val = memlimit | membase; 577 val = memlimit | membase;
583 writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE); 578 dw_pcie_writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE);
584 579
585 /* setup command register */ 580 /* setup command register */
586 readl_rc(pp, dbi_base + PCI_COMMAND, &val); 581 dw_pcie_readl_rc(pp, dbi_base + PCI_COMMAND, &val);
587 val &= 0xffff0000; 582 val &= 0xffff0000;
588 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 583 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
589 PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 584 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
590 writel_rc(pp, val, dbi_base + PCI_COMMAND); 585 dw_pcie_writel_rc(pp, val, dbi_base + PCI_COMMAND);
591}
592
593static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
594{
595 u32 val;
596 void __iomem *elbi_base = pp->elbi_base;
597
598 val = readl(elbi_base + PCIE_CORE_RESET);
599 val &= ~PCIE_CORE_RESET_ENABLE;
600 writel(val, elbi_base + PCIE_CORE_RESET);
601 writel(0, elbi_base + PCIE_PWR_RESET);
602 writel(0, elbi_base + PCIE_STICKY_RESET);
603 writel(0, elbi_base + PCIE_NONSTICKY_RESET);
604}
605
606static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
607{
608 u32 val;
609 void __iomem *elbi_base = pp->elbi_base;
610 void __iomem *purple_base = pp->purple_base;
611
612 val = readl(elbi_base + PCIE_CORE_RESET);
613 val |= PCIE_CORE_RESET_ENABLE;
614 writel(val, elbi_base + PCIE_CORE_RESET);
615 writel(1, elbi_base + PCIE_STICKY_RESET);
616 writel(1, elbi_base + PCIE_NONSTICKY_RESET);
617 writel(1, elbi_base + PCIE_APP_INIT_RESET);
618 writel(0, elbi_base + PCIE_APP_INIT_RESET);
619 writel(1, purple_base + PCIE_PHY_MAC_RESET);
620}
621
622static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
623{
624 void __iomem *purple_base = pp->purple_base;
625
626 writel(0, purple_base + PCIE_PHY_MAC_RESET);
627 writel(1, purple_base + PCIE_PHY_GLOBAL_RESET);
628}
629
630static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
631{
632 void __iomem *elbi_base = pp->elbi_base;
633 void __iomem *purple_base = pp->purple_base;
634
635 writel(0, purple_base + PCIE_PHY_GLOBAL_RESET);
636 writel(1, elbi_base + PCIE_PWR_RESET);
637 writel(0, purple_base + PCIE_PHY_COMMON_RESET);
638 writel(0, purple_base + PCIE_PHY_CMN_REG);
639 writel(0, purple_base + PCIE_PHY_TRSVREG_RESET);
640 writel(0, purple_base + PCIE_PHY_TRSV_RESET);
641}
642
643static void exynos_pcie_init_phy(struct pcie_port *pp)
644{
645 void __iomem *phy_base = pp->phy_base;
646
647 /* DCC feedback control off */
648 writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK);
649
650 /* set TX/RX impedance */
651 writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE);
652
653 /* set 50Mhz PHY clock */
654 writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0);
655 writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1);
656
657 /* set TX Differential output for lane 0 */
658 writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL);
659
660 /* set TX Pre-emphasis Level Control for lane 0 to minimum */
661 writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
662
663 /* set RX clock and data recovery bandwidth */
664 writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS);
665 writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR);
666 writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR);
667 writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR);
668 writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR);
669
670 /* change TX Pre-emphasis Level Control for lanes */
671 writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
672 writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL);
673 writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL);
674 writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL);
675
676 /* set LVCC */
677 writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC);
678 writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC);
679 writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC);
680 writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC);
681}
682
683static void exynos_pcie_assert_reset(struct pcie_port *pp)
684{
685 if (pp->reset_gpio >= 0)
686 devm_gpio_request_one(pp->dev, pp->reset_gpio,
687 GPIOF_OUT_INIT_HIGH, "RESET");
688 return;
689}
690
691static int exynos_pcie_establish_link(struct pcie_port *pp)
692{
693 u32 val;
694 int count = 0;
695 void __iomem *elbi_base = pp->elbi_base;
696 void __iomem *purple_base = pp->purple_base;
697 void __iomem *phy_base = pp->phy_base;
698
699 if (exynos_pcie_link_up(pp)) {
700 dev_err(pp->dev, "Link already up\n");
701 return 0;
702 }
703
704 /* assert reset signals */
705 exynos_pcie_assert_core_reset(pp);
706 exynos_pcie_assert_phy_reset(pp);
707
708 /* de-assert phy reset */
709 exynos_pcie_deassert_phy_reset(pp);
710
711 /* initialize phy */
712 exynos_pcie_init_phy(pp);
713
714 /* pulse for common reset */
715 writel(1, purple_base + PCIE_PHY_COMMON_RESET);
716 udelay(500);
717 writel(0, purple_base + PCIE_PHY_COMMON_RESET);
718
719 /* de-assert core reset */
720 exynos_pcie_deassert_core_reset(pp);
721
722 /* setup root complex */
723 exynos_pcie_setup_rc(pp);
724
725 /* assert reset signal */
726 exynos_pcie_assert_reset(pp);
727
728 /* assert LTSSM enable */
729 writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE);
730
731 /* check if the link is up or not */
732 while (!exynos_pcie_link_up(pp)) {
733 mdelay(100);
734 count++;
735 if (count == 10) {
736 while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) {
737 val = readl(purple_base + PCIE_PHY_PLL_LOCKED);
738 dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
739 }
740 dev_err(pp->dev, "PCIe Link Fail\n");
741 return -EINVAL;
742 }
743 }
744
745 dev_info(pp->dev, "Link up\n");
746
747 return 0;
748}
749
750static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
751{
752 u32 val;
753 void __iomem *elbi_base = pp->elbi_base;
754
755 val = readl(elbi_base + PCIE_IRQ_PULSE);
756 writel(val, elbi_base + PCIE_IRQ_PULSE);
757 return;
758}
759
760static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
761{
762 u32 val;
763 void __iomem *elbi_base = pp->elbi_base;
764
765 /* enable INTX interrupt */
766 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
767 IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
768 writel(val, elbi_base + PCIE_IRQ_EN_PULSE);
769 return;
770}
771
772static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
773{
774 struct pcie_port *pp = arg;
775
776 exynos_pcie_clear_irq_pulse(pp);
777 return IRQ_HANDLED;
778}
779
780static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
781{
782 exynos_pcie_enable_irq_pulse(pp);
783 return;
784}
785
786static void exynos_pcie_host_init(struct pcie_port *pp)
787{
788 struct pcie_port_info *config = &pp->config;
789 u32 val;
790
791 /* Keep first 64K for IO */
792 pp->cfg0_base = pp->cfg.start;
793 pp->cfg1_base = pp->cfg.start + config->cfg0_size;
794 pp->io_base = pp->io.start;
795 pp->mem_base = pp->mem.start;
796
797 /* enable link */
798 exynos_pcie_establish_link(pp);
799
800 exynos_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
801
802 /* program correct class for RC */
803 exynos_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
804
805 exynos_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
806 val |= PORT_LOGIC_SPEED_CHANGE;
807 exynos_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
808
809 exynos_pcie_enable_interrupts(pp);
810}
811
812static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
813{
814 struct resource *elbi_base;
815 struct resource *phy_base;
816 struct resource *purple_base;
817 int ret;
818
819 elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
820 if (!elbi_base) {
821 dev_err(&pdev->dev, "couldn't get elbi base resource\n");
822 return -EINVAL;
823 }
824 pp->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
825 if (IS_ERR(pp->elbi_base))
826 return PTR_ERR(pp->elbi_base);
827
828 phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
829 if (!phy_base) {
830 dev_err(&pdev->dev, "couldn't get phy base resource\n");
831 return -EINVAL;
832 }
833 pp->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
834 if (IS_ERR(pp->phy_base))
835 return PTR_ERR(pp->phy_base);
836
837 purple_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
838 if (!purple_base) {
839 dev_err(&pdev->dev, "couldn't get purple base resource\n");
840 return -EINVAL;
841 }
842 pp->purple_base = devm_ioremap_resource(&pdev->dev, purple_base);
843 if (IS_ERR(pp->purple_base))
844 return PTR_ERR(pp->purple_base);
845
846 pp->irq = platform_get_irq(pdev, 1);
847 if (!pp->irq) {
848 dev_err(&pdev->dev, "failed to get irq\n");
849 return -ENODEV;
850 }
851 ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
852 IRQF_SHARED, "exynos-pcie", pp);
853 if (ret) {
854 dev_err(&pdev->dev, "failed to request irq\n");
855 return ret;
856 }
857
858 pp->dbi_base = devm_ioremap(&pdev->dev, pp->cfg.start,
859 resource_size(&pp->cfg));
860 if (!pp->dbi_base) {
861 dev_err(&pdev->dev, "error with ioremap\n");
862 return -ENOMEM;
863 }
864
865 pp->root_bus_nr = -1;
866
867 spin_lock_init(&pp->conf_lock);
868 exynos_pcie_host_init(pp);
869 pp->va_cfg0_base = devm_ioremap(&pdev->dev, pp->cfg0_base,
870 pp->config.cfg0_size);
871 if (!pp->va_cfg0_base) {
872 dev_err(pp->dev, "error with ioremap in function\n");
873 return -ENOMEM;
874 }
875 pp->va_cfg1_base = devm_ioremap(&pdev->dev, pp->cfg1_base,
876 pp->config.cfg1_size);
877 if (!pp->va_cfg1_base) {
878 dev_err(pp->dev, "error with ioremap\n");
879 return -ENOMEM;
880 }
881
882 return 0;
883}
884
885static int __init exynos_pcie_probe(struct platform_device *pdev)
886{
887 struct pcie_port *pp;
888 struct device_node *np = pdev->dev.of_node;
889 struct of_pci_range range;
890 struct of_pci_range_parser parser;
891 int ret;
892
893 pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL);
894 if (!pp) {
895 dev_err(&pdev->dev, "no memory for pcie port\n");
896 return -ENOMEM;
897 }
898
899 pp->dev = &pdev->dev;
900
901 if (of_pci_range_parser_init(&parser, np)) {
902 dev_err(&pdev->dev, "missing ranges property\n");
903 return -EINVAL;
904 }
905
906 /* Get the I/O and memory ranges from DT */
907 for_each_of_pci_range(&parser, &range) {
908 unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
909 if (restype == IORESOURCE_IO) {
910 of_pci_range_to_resource(&range, np, &pp->io);
911 pp->io.name = "I/O";
912 pp->io.start = max_t(resource_size_t,
913 PCIBIOS_MIN_IO,
914 range.pci_addr + global_io_offset);
915 pp->io.end = min_t(resource_size_t,
916 IO_SPACE_LIMIT,
917 range.pci_addr + range.size
918 + global_io_offset);
919 pp->config.io_size = resource_size(&pp->io);
920 pp->config.io_bus_addr = range.pci_addr;
921 }
922 if (restype == IORESOURCE_MEM) {
923 of_pci_range_to_resource(&range, np, &pp->mem);
924 pp->mem.name = "MEM";
925 pp->config.mem_size = resource_size(&pp->mem);
926 pp->config.mem_bus_addr = range.pci_addr;
927 }
928 if (restype == 0) {
929 of_pci_range_to_resource(&range, np, &pp->cfg);
930 pp->config.cfg0_size = resource_size(&pp->cfg)/2;
931 pp->config.cfg1_size = resource_size(&pp->cfg)/2;
932 }
933 }
934
935 pp->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
936
937 pp->clk = devm_clk_get(&pdev->dev, "pcie");
938 if (IS_ERR(pp->clk)) {
939 dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
940 return PTR_ERR(pp->clk);
941 }
942 ret = clk_prepare_enable(pp->clk);
943 if (ret)
944 return ret;
945
946 pp->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
947 if (IS_ERR(pp->bus_clk)) {
948 dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
949 ret = PTR_ERR(pp->bus_clk);
950 goto fail_clk;
951 }
952 ret = clk_prepare_enable(pp->bus_clk);
953 if (ret)
954 goto fail_clk;
955
956 ret = add_pcie_port(pp, pdev);
957 if (ret < 0)
958 goto fail_bus_clk;
959
960 pp->controller = exynos_pci.nr_controllers;
961 exynos_pci.nr_controllers = 1;
962 exynos_pci.private_data = (void **)&pp;
963
964 pci_common_init(&exynos_pci);
965 pci_assign_unassigned_resources();
966#ifdef CONFIG_PCI_DOMAINS
967 exynos_pci.domain++;
968#endif
969
970 platform_set_drvdata(pdev, pp);
971 return 0;
972
973fail_bus_clk:
974 clk_disable_unprepare(pp->bus_clk);
975fail_clk:
976 clk_disable_unprepare(pp->clk);
977 return ret;
978}
979
980static int __exit exynos_pcie_remove(struct platform_device *pdev)
981{
982 struct pcie_port *pp = platform_get_drvdata(pdev);
983
984 clk_disable_unprepare(pp->bus_clk);
985 clk_disable_unprepare(pp->clk);
986
987 return 0;
988}
989
990static const struct of_device_id exynos_pcie_of_match[] = {
991 { .compatible = "samsung,exynos5440-pcie", },
992 {},
993};
994MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
995
996static struct platform_driver exynos_pcie_driver = {
997 .remove = __exit_p(exynos_pcie_remove),
998 .driver = {
999 .name = "exynos-pcie",
1000 .owner = THIS_MODULE,
1001 .of_match_table = of_match_ptr(exynos_pcie_of_match),
1002 },
1003};
1004
1005static int exynos_pcie_abort(unsigned long addr, unsigned int fsr,
1006 struct pt_regs *regs)
1007{
1008 unsigned long pc = instruction_pointer(regs);
1009 unsigned long instr = *(unsigned long *)pc;
1010
1011 WARN_ONCE(1, "pcie abort\n");
1012
1013 /*
1014 * If the instruction being executed was a read,
1015 * make it look like it read all-ones.
1016 */
1017 if ((instr & 0x0c100000) == 0x04100000) {
1018 int reg = (instr >> 12) & 15;
1019 unsigned long val;
1020
1021 if (instr & 0x00400000)
1022 val = 255;
1023 else
1024 val = -1;
1025
1026 regs->uregs[reg] = val;
1027 regs->ARM_pc += 4;
1028 return 0;
1029 }
1030
1031 if ((instr & 0x0e100090) == 0x00100090) {
1032 int reg = (instr >> 12) & 15;
1033
1034 regs->uregs[reg] = -1;
1035 regs->ARM_pc += 4;
1036 return 0;
1037 }
1038
1039 return 1;
1040}
1041
1042/* Exynos PCIe driver does not allow module unload */
1043
1044static int __init pcie_init(void)
1045{
1046 hook_fault_code(16 + 6, exynos_pcie_abort, SIGBUS, 0,
1047 "imprecise external abort");
1048
1049 platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
1050
1051 return 0;
1052} 586}
1053subsys_initcall(pcie_init);
1054 587
1055MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); 588MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
1056MODULE_DESCRIPTION("Samsung PCIe host controller driver"); 589MODULE_DESCRIPTION("Designware PCIe host controller driver");
1057MODULE_LICENSE("GPL v2"); 590MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
new file mode 100644
index 000000000000..133820f1da97
--- /dev/null
+++ b/drivers/pci/host/pcie-designware.h
@@ -0,0 +1,65 @@
1/*
2 * Synopsys Designware PCIe host controller driver
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14struct pcie_port_info {
15 u32 cfg0_size;
16 u32 cfg1_size;
17 u32 io_size;
18 u32 mem_size;
19 phys_addr_t io_bus_addr;
20 phys_addr_t mem_bus_addr;
21};
22
23struct pcie_port {
24 struct device *dev;
25 u8 root_bus_nr;
26 void __iomem *dbi_base;
27 u64 cfg0_base;
28 void __iomem *va_cfg0_base;
29 u64 cfg1_base;
30 void __iomem *va_cfg1_base;
31 u64 io_base;
32 u64 mem_base;
33 spinlock_t conf_lock;
34 struct resource cfg;
35 struct resource io;
36 struct resource mem;
37 struct pcie_port_info config;
38 int irq;
39 u32 lanes;
40 struct pcie_host_ops *ops;
41};
42
43struct pcie_host_ops {
44 void (*readl_rc)(struct pcie_port *pp,
45 void __iomem *dbi_base, u32 *val);
46 void (*writel_rc)(struct pcie_port *pp,
47 u32 val, void __iomem *dbi_base);
48 int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
49 int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
50 int (*link_up)(struct pcie_port *pp);
51 void (*host_init)(struct pcie_port *pp);
52};
53
54extern unsigned long global_io_offset;
55
56int cfg_read(void __iomem *addr, int where, int size, u32 *val);
57int cfg_write(void __iomem *addr, int where, int size, u32 val);
58int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
59int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
60int dw_pcie_link_up(struct pcie_port *pp);
61void dw_pcie_setup_rc(struct pcie_port *pp);
62int dw_pcie_host_init(struct pcie_port *pp);
63int dw_pcie_setup(int nr, struct pci_sys_data *sys);
64struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys);
65int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 7fb326983ed6..541bbe6d5343 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -155,6 +155,7 @@ void pciehp_green_led_off(struct slot *slot);
155void pciehp_green_led_blink(struct slot *slot); 155void pciehp_green_led_blink(struct slot *slot);
156int pciehp_check_link_status(struct controller *ctrl); 156int pciehp_check_link_status(struct controller *ctrl);
157void pciehp_release_ctrl(struct controller *ctrl); 157void pciehp_release_ctrl(struct controller *ctrl);
158int pciehp_reset_slot(struct slot *slot, int probe);
158 159
159static inline const char *slot_name(struct slot *slot) 160static inline const char *slot_name(struct slot *slot)
160{ 161{
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7d72c5e2eba9..f4a18f51a29c 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -69,6 +69,7 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
69static int get_attention_status (struct hotplug_slot *slot, u8 *value); 69static int get_attention_status (struct hotplug_slot *slot, u8 *value);
70static int get_latch_status (struct hotplug_slot *slot, u8 *value); 70static int get_latch_status (struct hotplug_slot *slot, u8 *value);
71static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 71static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
72static int reset_slot (struct hotplug_slot *slot, int probe);
72 73
73/** 74/**
74 * release_slot - free up the memory used by a slot 75 * release_slot - free up the memory used by a slot
@@ -111,6 +112,7 @@ static int init_slot(struct controller *ctrl)
111 ops->disable_slot = disable_slot; 112 ops->disable_slot = disable_slot;
112 ops->get_power_status = get_power_status; 113 ops->get_power_status = get_power_status;
113 ops->get_adapter_status = get_adapter_status; 114 ops->get_adapter_status = get_adapter_status;
115 ops->reset_slot = reset_slot;
114 if (MRL_SENS(ctrl)) 116 if (MRL_SENS(ctrl))
115 ops->get_latch_status = get_latch_status; 117 ops->get_latch_status = get_latch_status;
116 if (ATTN_LED(ctrl)) { 118 if (ATTN_LED(ctrl)) {
@@ -223,6 +225,16 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
223 return pciehp_get_adapter_status(slot, value); 225 return pciehp_get_adapter_status(slot, value);
224} 226}
225 227
228static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
229{
230 struct slot *slot = hotplug_slot->private;
231
232 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
233 __func__, slot_name(slot));
234
235 return pciehp_reset_slot(slot, probe);
236}
237
226static int pciehp_probe(struct pcie_device *dev) 238static int pciehp_probe(struct pcie_device *dev)
227{ 239{
228 int rc; 240 int rc;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b2255736ac81..51f56ef4ab6f 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -749,6 +749,37 @@ static void pcie_disable_notification(struct controller *ctrl)
749 ctrl_warn(ctrl, "Cannot disable software notification\n"); 749 ctrl_warn(ctrl, "Cannot disable software notification\n");
750} 750}
751 751
752/*
753 * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
754 * bus reset of the bridge, but if the slot supports surprise removal we need
755 * to disable presence detection around the bus reset and clear any spurious
756 * events after.
757 */
758int pciehp_reset_slot(struct slot *slot, int probe)
759{
760 struct controller *ctrl = slot->ctrl;
761
762 if (probe)
763 return 0;
764
765 if (HP_SUPR_RM(ctrl)) {
766 pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_PDCE);
767 if (pciehp_poll_mode)
768 del_timer_sync(&ctrl->poll_timer);
769 }
770
771 pci_reset_bridge_secondary_bus(ctrl->pcie->port);
772
773 if (HP_SUPR_RM(ctrl)) {
774 pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC);
775 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
776 if (pciehp_poll_mode)
777 int_poll_timeout(ctrl->poll_timer.data);
778 }
779
780 return 0;
781}
782
752int pcie_init_notification(struct controller *ctrl) 783int pcie_init_notification(struct controller *ctrl)
753{ 784{
754 if (pciehp_request_irq(ctrl)) 785 if (pciehp_request_irq(ctrl))
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index de8ffacf9c9b..21a7182dccd4 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -286,7 +286,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
286 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial))) 286 (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
287 return -EINVAL; 287 return -EINVAL;
288 288
289 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
290 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset); 289 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
291 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride); 290 pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
292 if (!offset || (nr_virtfn > 1 && !stride)) 291 if (!offset || (nr_virtfn > 1 && !stride))
@@ -324,7 +323,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
324 323
325 if (!pdev->is_physfn) { 324 if (!pdev->is_physfn) {
326 pci_dev_put(pdev); 325 pci_dev_put(pdev);
327 return -ENODEV; 326 return -ENOSYS;
328 } 327 }
329 328
330 rc = sysfs_create_link(&dev->dev.kobj, 329 rc = sysfs_create_link(&dev->dev.kobj,
@@ -334,6 +333,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
334 return rc; 333 return rc;
335 } 334 }
336 335
336 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
337 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; 337 iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
338 pci_cfg_access_lock(dev); 338 pci_cfg_access_lock(dev);
339 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 339 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
@@ -368,6 +368,7 @@ failed:
368 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); 368 iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
369 pci_cfg_access_lock(dev); 369 pci_cfg_access_lock(dev);
370 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); 370 pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
371 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
371 ssleep(1); 372 ssleep(1);
372 pci_cfg_access_unlock(dev); 373 pci_cfg_access_unlock(dev);
373 374
@@ -401,6 +402,7 @@ static void sriov_disable(struct pci_dev *dev)
401 sysfs_remove_link(&dev->dev.kobj, "dep_link"); 402 sysfs_remove_link(&dev->dev.kobj, "dep_link");
402 403
403 iov->num_VFs = 0; 404 iov->num_VFs = 0;
405 pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
404} 406}
405 407
406static int sriov_init(struct pci_dev *dev, int pos) 408static int sriov_init(struct pci_dev *dev, int pos)
@@ -662,7 +664,7 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
662 might_sleep(); 664 might_sleep();
663 665
664 if (!dev->is_physfn) 666 if (!dev->is_physfn)
665 return -ENODEV; 667 return -ENOSYS;
666 668
667 return sriov_enable(dev, nr_virtfn); 669 return sriov_enable(dev, nr_virtfn);
668} 670}
@@ -722,7 +724,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
722 * @dev: the PCI device 724 * @dev: the PCI device
723 * 725 *
724 * Returns number of VFs belonging to this device that are assigned to a guest. 726 * Returns number of VFs belonging to this device that are assigned to a guest.
725 * If device is not a physical function returns -ENODEV. 727 * If device is not a physical function returns 0.
726 */ 728 */
727int pci_vfs_assigned(struct pci_dev *dev) 729int pci_vfs_assigned(struct pci_dev *dev)
728{ 730{
@@ -767,12 +769,15 @@ EXPORT_SYMBOL_GPL(pci_vfs_assigned);
767 * device's mutex held. 769 * device's mutex held.
768 * 770 *
769 * Returns 0 if PF is an SRIOV-capable device and 771 * Returns 0 if PF is an SRIOV-capable device and
770 * value of numvfs valid. If not a PF with VFS, return -EINVAL; 772 * value of numvfs valid. If not a PF return -ENOSYS;
773 * if numvfs is invalid return -EINVAL;
771 * if VFs already enabled, return -EBUSY. 774 * if VFs already enabled, return -EBUSY.
772 */ 775 */
773int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) 776int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
774{ 777{
775 if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs)) 778 if (!dev->is_physfn)
779 return -ENOSYS;
780 if (numvfs > dev->sriov->total_VFs)
776 return -EINVAL; 781 return -EINVAL;
777 782
778 /* Shouldn't change if VFs already enabled */ 783 /* Shouldn't change if VFs already enabled */
@@ -786,17 +791,17 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
786EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); 791EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
787 792
788/** 793/**
789 * pci_sriov_get_totalvfs -- get total VFs supported on this devic3 794 * pci_sriov_get_totalvfs -- get total VFs supported on this device
790 * @dev: the PCI PF device 795 * @dev: the PCI PF device
791 * 796 *
792 * For a PCIe device with SRIOV support, return the PCIe 797 * For a PCIe device with SRIOV support, return the PCIe
793 * SRIOV capability value of TotalVFs or the value of driver_max_VFs 798 * SRIOV capability value of TotalVFs or the value of driver_max_VFs
794 * if the driver reduced it. Otherwise, -EINVAL. 799 * if the driver reduced it. Otherwise 0.
795 */ 800 */
796int pci_sriov_get_totalvfs(struct pci_dev *dev) 801int pci_sriov_get_totalvfs(struct pci_dev *dev)
797{ 802{
798 if (!dev->is_physfn) 803 if (!dev->is_physfn)
799 return -EINVAL; 804 return 0;
800 805
801 if (dev->sriov->driver_max_VFs) 806 if (dev->sriov->driver_max_VFs)
802 return dev->sriov->driver_max_VFs; 807 return dev->sriov->driver_max_VFs;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c0dbe1f61362..7128cfdd64aa 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -131,19 +131,19 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
131 return ret; 131 return ret;
132} 132}
133 133
134static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev, 134static ssize_t cpuaffinity_show(struct device *dev,
135 struct device_attribute *attr, 135 struct device_attribute *attr, char *buf)
136 char *buf)
137{ 136{
138 return pci_bus_show_cpuaffinity(dev, 0, attr, buf); 137 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
139} 138}
139static DEVICE_ATTR_RO(cpuaffinity);
140 140
141static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev, 141static ssize_t cpulistaffinity_show(struct device *dev,
142 struct device_attribute *attr, 142 struct device_attribute *attr, char *buf)
143 char *buf)
144{ 143{
145 return pci_bus_show_cpuaffinity(dev, 1, attr, buf); 144 return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
146} 145}
146static DEVICE_ATTR_RO(cpulistaffinity);
147 147
148/* show resources */ 148/* show resources */
149static ssize_t 149static ssize_t
@@ -379,6 +379,7 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
379 } 379 }
380 return count; 380 return count;
381} 381}
382static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
382 383
383#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) 384#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
384static ssize_t d3cold_allowed_store(struct device *dev, 385static ssize_t d3cold_allowed_store(struct device *dev,
@@ -514,11 +515,20 @@ struct device_attribute pci_dev_attrs[] = {
514 __ATTR_NULL, 515 __ATTR_NULL,
515}; 516};
516 517
517struct device_attribute pcibus_dev_attrs[] = { 518static struct attribute *pcibus_attrs[] = {
518 __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store), 519 &dev_attr_rescan.attr,
519 __ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL), 520 &dev_attr_cpuaffinity.attr,
520 __ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL), 521 &dev_attr_cpulistaffinity.attr,
521 __ATTR_NULL, 522 NULL,
523};
524
525static const struct attribute_group pcibus_group = {
526 .attrs = pcibus_attrs,
527};
528
529const struct attribute_group *pcibus_groups[] = {
530 &pcibus_group,
531 NULL,
522}; 532};
523 533
524static ssize_t 534static ssize_t
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 44a1a8a0ad7b..d3fdce8f3d65 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25#include <linux/pci_hotplug.h>
25#include <asm-generic/pci-bridge.h> 26#include <asm-generic/pci-bridge.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include "pci.h" 28#include "pci.h"
@@ -2012,7 +2013,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
2012} 2013}
2013 2014
2014/** 2015/**
2015 * pci_add_save_buffer - allocate buffer for saving given capability registers 2016 * pci_add_cap_save_buffer - allocate buffer for saving given capability registers
2016 * @dev: the PCI device 2017 * @dev: the PCI device
2017 * @cap: the capability to allocate the buffer for 2018 * @cap: the capability to allocate the buffer for
2018 * @size: requested size of the buffer 2019 * @size: requested size of the buffer
@@ -2379,6 +2380,27 @@ void pci_enable_acs(struct pci_dev *dev)
2379 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 2380 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2380} 2381}
2381 2382
2383static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2384{
2385 int pos;
2386 u16 cap, ctrl;
2387
2388 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2389 if (!pos)
2390 return false;
2391
2392 /*
2393 * Except for egress control, capabilities are either required
2394 * or only required if controllable. Features missing from the
2395 * capability field can therefore be assumed as hard-wired enabled.
2396 */
2397 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2398 acs_flags &= (cap | PCI_ACS_EC);
2399
2400 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2401 return (ctrl & acs_flags) == acs_flags;
2402}
2403
2382/** 2404/**
2383 * pci_acs_enabled - test ACS against required flags for a given device 2405 * pci_acs_enabled - test ACS against required flags for a given device
2384 * @pdev: device to test 2406 * @pdev: device to test
@@ -2386,36 +2408,76 @@ void pci_enable_acs(struct pci_dev *dev)
2386 * 2408 *
2387 * Return true if the device supports the provided flags. Automatically 2409 * Return true if the device supports the provided flags. Automatically
2388 * filters out flags that are not implemented on multifunction devices. 2410 * filters out flags that are not implemented on multifunction devices.
2411 *
2412 * Note that this interface checks the effective ACS capabilities of the
2413 * device rather than the actual capabilities. For instance, most single
2414 * function endpoints are not required to support ACS because they have no
2415 * opportunity for peer-to-peer access. We therefore return 'true'
2416 * regardless of whether the device exposes an ACS capability. This makes
2417 * it much easier for callers of this function to ignore the actual type
2418 * or topology of the device when testing ACS support.
2389 */ 2419 */
2390bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 2420bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2391{ 2421{
2392 int pos, ret; 2422 int ret;
2393 u16 ctrl;
2394 2423
2395 ret = pci_dev_specific_acs_enabled(pdev, acs_flags); 2424 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2396 if (ret >= 0) 2425 if (ret >= 0)
2397 return ret > 0; 2426 return ret > 0;
2398 2427
2428 /*
2429 * Conventional PCI and PCI-X devices never support ACS, either
2430 * effectively or actually. The shared bus topology implies that
2431 * any device on the bus can receive or snoop DMA.
2432 */
2399 if (!pci_is_pcie(pdev)) 2433 if (!pci_is_pcie(pdev))
2400 return false; 2434 return false;
2401 2435
2402 /* Filter out flags not applicable to multifunction */ 2436 switch (pci_pcie_type(pdev)) {
2403 if (pdev->multifunction) 2437 /*
2404 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | 2438 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2405 PCI_ACS_EC | PCI_ACS_DT); 2439 * but since their primary inteface is PCI/X, we conservatively
2406 2440 * handle them as we would a non-PCIe device.
2407 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || 2441 */
2408 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 2442 case PCI_EXP_TYPE_PCIE_BRIDGE:
2409 pdev->multifunction) { 2443 /*
2410 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 2444 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2411 if (!pos) 2445 * applicable... must never implement an ACS Extended Capability...".
2412 return false; 2446 * This seems arbitrary, but we take a conservative interpretation
2447 * of this statement.
2448 */
2449 case PCI_EXP_TYPE_PCI_BRIDGE:
2450 case PCI_EXP_TYPE_RC_EC:
2451 return false;
2452 /*
2453 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2454 * implement ACS in order to indicate their peer-to-peer capabilities,
2455 * regardless of whether they are single- or multi-function devices.
2456 */
2457 case PCI_EXP_TYPE_DOWNSTREAM:
2458 case PCI_EXP_TYPE_ROOT_PORT:
2459 return pci_acs_flags_enabled(pdev, acs_flags);
2460 /*
2461 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2462 * implemented by the remaining PCIe types to indicate peer-to-peer
2463 * capabilities, but only when they are part of a multifunciton
2464 * device. The footnote for section 6.12 indicates the specific
2465 * PCIe types included here.
2466 */
2467 case PCI_EXP_TYPE_ENDPOINT:
2468 case PCI_EXP_TYPE_UPSTREAM:
2469 case PCI_EXP_TYPE_LEG_END:
2470 case PCI_EXP_TYPE_RC_END:
2471 if (!pdev->multifunction)
2472 break;
2413 2473
2414 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 2474 return pci_acs_flags_enabled(pdev, acs_flags);
2415 if ((ctrl & acs_flags) != acs_flags)
2416 return false;
2417 } 2475 }
2418 2476
2477 /*
2478 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable
2479 * to single function devices with the exception of downstream ports.
2480 */
2419 return true; 2481 return true;
2420} 2482}
2421 2483
@@ -3118,19 +3180,17 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3118} 3180}
3119EXPORT_SYMBOL(pci_set_dma_seg_boundary); 3181EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3120 3182
3121static int pcie_flr(struct pci_dev *dev, int probe) 3183/**
3184 * pci_wait_for_pending_transaction - waits for pending transaction
3185 * @dev: the PCI device to operate on
3186 *
3187 * Return 0 if transaction is pending 1 otherwise.
3188 */
3189int pci_wait_for_pending_transaction(struct pci_dev *dev)
3122{ 3190{
3123 int i; 3191 int i;
3124 u32 cap;
3125 u16 status; 3192 u16 status;
3126 3193
3127 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3128 if (!(cap & PCI_EXP_DEVCAP_FLR))
3129 return -ENOTTY;
3130
3131 if (probe)
3132 return 0;
3133
3134 /* Wait for Transaction Pending bit clean */ 3194 /* Wait for Transaction Pending bit clean */
3135 for (i = 0; i < 4; i++) { 3195 for (i = 0; i < 4; i++) {
3136 if (i) 3196 if (i)
@@ -3138,13 +3198,27 @@ static int pcie_flr(struct pci_dev *dev, int probe)
3138 3198
3139 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 3199 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3140 if (!(status & PCI_EXP_DEVSTA_TRPND)) 3200 if (!(status & PCI_EXP_DEVSTA_TRPND))
3141 goto clear; 3201 return 1;
3142 } 3202 }
3143 3203
3144 dev_err(&dev->dev, "transaction is not cleared; " 3204 return 0;
3145 "proceeding with reset anyway\n"); 3205}
3206EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3207
3208static int pcie_flr(struct pci_dev *dev, int probe)
3209{
3210 u32 cap;
3211
3212 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3213 if (!(cap & PCI_EXP_DEVCAP_FLR))
3214 return -ENOTTY;
3215
3216 if (probe)
3217 return 0;
3218
3219 if (!pci_wait_for_pending_transaction(dev))
3220 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3146 3221
3147clear:
3148 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3222 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3149 3223
3150 msleep(100); 3224 msleep(100);
@@ -3235,9 +3309,42 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
3235 return 0; 3309 return 0;
3236} 3310}
3237 3311
3238static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 3312/**
3313 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3314 * @dev: Bridge device
3315 *
3316 * Use the bridge control register to assert reset on the secondary bus.
3317 * Devices on the secondary bus are left in power-on state.
3318 */
3319void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3239{ 3320{
3240 u16 ctrl; 3321 u16 ctrl;
3322
3323 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3324 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3325 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3326 /*
3327 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
3328 * this to 2ms to ensure that we meet the minium requirement.
3329 */
3330 msleep(2);
3331
3332 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3333 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3334
3335 /*
3336 * Trhfa for conventional PCI is 2^25 clock cycles.
3337 * Assuming a minimum 33MHz clock this results in a 1s
3338 * delay before we can consider subordinate devices to
3339 * be re-initialized. PCIe has some ways to shorten this,
3340 * but we don't make use of them yet.
3341 */
3342 ssleep(1);
3343}
3344EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3345
3346static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3347{
3241 struct pci_dev *pdev; 3348 struct pci_dev *pdev;
3242 3349
3243 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 3350 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
@@ -3250,18 +3357,40 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3250 if (probe) 3357 if (probe)
3251 return 0; 3358 return 0;
3252 3359
3253 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 3360 pci_reset_bridge_secondary_bus(dev->bus->self);
3254 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3255 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3256 msleep(100);
3257
3258 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3259 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3260 msleep(100);
3261 3361
3262 return 0; 3362 return 0;
3263} 3363}
3264 3364
3365static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3366{
3367 int rc = -ENOTTY;
3368
3369 if (!hotplug || !try_module_get(hotplug->ops->owner))
3370 return rc;
3371
3372 if (hotplug->ops->reset_slot)
3373 rc = hotplug->ops->reset_slot(hotplug, probe);
3374
3375 module_put(hotplug->ops->owner);
3376
3377 return rc;
3378}
3379
3380static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3381{
3382 struct pci_dev *pdev;
3383
3384 if (dev->subordinate || !dev->slot)
3385 return -ENOTTY;
3386
3387 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3388 if (pdev != dev && pdev->slot == dev->slot)
3389 return -ENOTTY;
3390
3391 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3392}
3393
3265static int __pci_dev_reset(struct pci_dev *dev, int probe) 3394static int __pci_dev_reset(struct pci_dev *dev, int probe)
3266{ 3395{
3267 int rc; 3396 int rc;
@@ -3284,27 +3413,65 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe)
3284 if (rc != -ENOTTY) 3413 if (rc != -ENOTTY)
3285 goto done; 3414 goto done;
3286 3415
3416 rc = pci_dev_reset_slot_function(dev, probe);
3417 if (rc != -ENOTTY)
3418 goto done;
3419
3287 rc = pci_parent_bus_reset(dev, probe); 3420 rc = pci_parent_bus_reset(dev, probe);
3288done: 3421done:
3289 return rc; 3422 return rc;
3290} 3423}
3291 3424
3425static void pci_dev_lock(struct pci_dev *dev)
3426{
3427 pci_cfg_access_lock(dev);
3428 /* block PM suspend, driver probe, etc. */
3429 device_lock(&dev->dev);
3430}
3431
3432static void pci_dev_unlock(struct pci_dev *dev)
3433{
3434 device_unlock(&dev->dev);
3435 pci_cfg_access_unlock(dev);
3436}
3437
3438static void pci_dev_save_and_disable(struct pci_dev *dev)
3439{
3440 /*
3441 * Wake-up device prior to save. PM registers default to D0 after
3442 * reset and a simple register restore doesn't reliably return
3443 * to a non-D0 state anyway.
3444 */
3445 pci_set_power_state(dev, PCI_D0);
3446
3447 pci_save_state(dev);
3448 /*
3449 * Disable the device by clearing the Command register, except for
3450 * INTx-disable which is set. This not only disables MMIO and I/O port
3451 * BARs, but also prevents the device from being Bus Master, preventing
3452 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
3453 * compliant devices, INTx-disable prevents legacy interrupts.
3454 */
3455 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3456}
3457
3458static void pci_dev_restore(struct pci_dev *dev)
3459{
3460 pci_restore_state(dev);
3461}
3462
3292static int pci_dev_reset(struct pci_dev *dev, int probe) 3463static int pci_dev_reset(struct pci_dev *dev, int probe)
3293{ 3464{
3294 int rc; 3465 int rc;
3295 3466
3296 if (!probe) { 3467 if (!probe)
3297 pci_cfg_access_lock(dev); 3468 pci_dev_lock(dev);
3298 /* block PM suspend, driver probe, etc. */
3299 device_lock(&dev->dev);
3300 }
3301 3469
3302 rc = __pci_dev_reset(dev, probe); 3470 rc = __pci_dev_reset(dev, probe);
3303 3471
3304 if (!probe) { 3472 if (!probe)
3305 device_unlock(&dev->dev); 3473 pci_dev_unlock(dev);
3306 pci_cfg_access_unlock(dev); 3474
3307 }
3308 return rc; 3475 return rc;
3309} 3476}
3310/** 3477/**
@@ -3395,22 +3562,249 @@ int pci_reset_function(struct pci_dev *dev)
3395 if (rc) 3562 if (rc)
3396 return rc; 3563 return rc;
3397 3564
3398 pci_save_state(dev); 3565 pci_dev_save_and_disable(dev);
3399
3400 /*
3401 * both INTx and MSI are disabled after the Interrupt Disable bit
3402 * is set and the Bus Master bit is cleared.
3403 */
3404 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3405 3566
3406 rc = pci_dev_reset(dev, 0); 3567 rc = pci_dev_reset(dev, 0);
3407 3568
3408 pci_restore_state(dev); 3569 pci_dev_restore(dev);
3409 3570
3410 return rc; 3571 return rc;
3411} 3572}
3412EXPORT_SYMBOL_GPL(pci_reset_function); 3573EXPORT_SYMBOL_GPL(pci_reset_function);
3413 3574
3575/* Lock devices from the top of the tree down */
3576static void pci_bus_lock(struct pci_bus *bus)
3577{
3578 struct pci_dev *dev;
3579
3580 list_for_each_entry(dev, &bus->devices, bus_list) {
3581 pci_dev_lock(dev);
3582 if (dev->subordinate)
3583 pci_bus_lock(dev->subordinate);
3584 }
3585}
3586
3587/* Unlock devices from the bottom of the tree up */
3588static void pci_bus_unlock(struct pci_bus *bus)
3589{
3590 struct pci_dev *dev;
3591
3592 list_for_each_entry(dev, &bus->devices, bus_list) {
3593 if (dev->subordinate)
3594 pci_bus_unlock(dev->subordinate);
3595 pci_dev_unlock(dev);
3596 }
3597}
3598
3599/* Lock devices from the top of the tree down */
3600static void pci_slot_lock(struct pci_slot *slot)
3601{
3602 struct pci_dev *dev;
3603
3604 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3605 if (!dev->slot || dev->slot != slot)
3606 continue;
3607 pci_dev_lock(dev);
3608 if (dev->subordinate)
3609 pci_bus_lock(dev->subordinate);
3610 }
3611}
3612
3613/* Unlock devices from the bottom of the tree up */
3614static void pci_slot_unlock(struct pci_slot *slot)
3615{
3616 struct pci_dev *dev;
3617
3618 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3619 if (!dev->slot || dev->slot != slot)
3620 continue;
3621 if (dev->subordinate)
3622 pci_bus_unlock(dev->subordinate);
3623 pci_dev_unlock(dev);
3624 }
3625}
3626
3627/* Save and disable devices from the top of the tree down */
3628static void pci_bus_save_and_disable(struct pci_bus *bus)
3629{
3630 struct pci_dev *dev;
3631
3632 list_for_each_entry(dev, &bus->devices, bus_list) {
3633 pci_dev_save_and_disable(dev);
3634 if (dev->subordinate)
3635 pci_bus_save_and_disable(dev->subordinate);
3636 }
3637}
3638
3639/*
3640 * Restore devices from top of the tree down - parent bridges need to be
3641 * restored before we can get to subordinate devices.
3642 */
3643static void pci_bus_restore(struct pci_bus *bus)
3644{
3645 struct pci_dev *dev;
3646
3647 list_for_each_entry(dev, &bus->devices, bus_list) {
3648 pci_dev_restore(dev);
3649 if (dev->subordinate)
3650 pci_bus_restore(dev->subordinate);
3651 }
3652}
3653
3654/* Save and disable devices from the top of the tree down */
3655static void pci_slot_save_and_disable(struct pci_slot *slot)
3656{
3657 struct pci_dev *dev;
3658
3659 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3660 if (!dev->slot || dev->slot != slot)
3661 continue;
3662 pci_dev_save_and_disable(dev);
3663 if (dev->subordinate)
3664 pci_bus_save_and_disable(dev->subordinate);
3665 }
3666}
3667
3668/*
3669 * Restore devices from top of the tree down - parent bridges need to be
3670 * restored before we can get to subordinate devices.
3671 */
3672static void pci_slot_restore(struct pci_slot *slot)
3673{
3674 struct pci_dev *dev;
3675
3676 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3677 if (!dev->slot || dev->slot != slot)
3678 continue;
3679 pci_dev_restore(dev);
3680 if (dev->subordinate)
3681 pci_bus_restore(dev->subordinate);
3682 }
3683}
3684
3685static int pci_slot_reset(struct pci_slot *slot, int probe)
3686{
3687 int rc;
3688
3689 if (!slot)
3690 return -ENOTTY;
3691
3692 if (!probe)
3693 pci_slot_lock(slot);
3694
3695 might_sleep();
3696
3697 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
3698
3699 if (!probe)
3700 pci_slot_unlock(slot);
3701
3702 return rc;
3703}
3704
3705/**
3706 * pci_probe_reset_slot - probe whether a PCI slot can be reset
3707 * @slot: PCI slot to probe
3708 *
3709 * Return 0 if slot can be reset, negative if a slot reset is not supported.
3710 */
3711int pci_probe_reset_slot(struct pci_slot *slot)
3712{
3713 return pci_slot_reset(slot, 1);
3714}
3715EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
3716
3717/**
3718 * pci_reset_slot - reset a PCI slot
3719 * @slot: PCI slot to reset
3720 *
3721 * A PCI bus may host multiple slots, each slot may support a reset mechanism
3722 * independent of other slots. For instance, some slots may support slot power
3723 * control. In the case of a 1:1 bus to slot architecture, this function may
3724 * wrap the bus reset to avoid spurious slot related events such as hotplug.
3725 * Generally a slot reset should be attempted before a bus reset. All of the
3726 * function of the slot and any subordinate buses behind the slot are reset
3727 * through this function. PCI config space of all devices in the slot and
3728 * behind the slot is saved before and restored after reset.
3729 *
3730 * Return 0 on success, non-zero on error.
3731 */
3732int pci_reset_slot(struct pci_slot *slot)
3733{
3734 int rc;
3735
3736 rc = pci_slot_reset(slot, 1);
3737 if (rc)
3738 return rc;
3739
3740 pci_slot_save_and_disable(slot);
3741
3742 rc = pci_slot_reset(slot, 0);
3743
3744 pci_slot_restore(slot);
3745
3746 return rc;
3747}
3748EXPORT_SYMBOL_GPL(pci_reset_slot);
3749
3750static int pci_bus_reset(struct pci_bus *bus, int probe)
3751{
3752 if (!bus->self)
3753 return -ENOTTY;
3754
3755 if (probe)
3756 return 0;
3757
3758 pci_bus_lock(bus);
3759
3760 might_sleep();
3761
3762 pci_reset_bridge_secondary_bus(bus->self);
3763
3764 pci_bus_unlock(bus);
3765
3766 return 0;
3767}
3768
3769/**
3770 * pci_probe_reset_bus - probe whether a PCI bus can be reset
3771 * @bus: PCI bus to probe
3772 *
3773 * Return 0 if bus can be reset, negative if a bus reset is not supported.
3774 */
3775int pci_probe_reset_bus(struct pci_bus *bus)
3776{
3777 return pci_bus_reset(bus, 1);
3778}
3779EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
3780
3781/**
3782 * pci_reset_bus - reset a PCI bus
3783 * @bus: top level PCI bus to reset
3784 *
3785 * Do a bus reset on the given bus and any subordinate buses, saving
3786 * and restoring state of all devices.
3787 *
3788 * Return 0 on success, non-zero on error.
3789 */
3790int pci_reset_bus(struct pci_bus *bus)
3791{
3792 int rc;
3793
3794 rc = pci_bus_reset(bus, 1);
3795 if (rc)
3796 return rc;
3797
3798 pci_bus_save_and_disable(bus);
3799
3800 rc = pci_bus_reset(bus, 0);
3801
3802 pci_bus_restore(bus);
3803
3804 return rc;
3805}
3806EXPORT_SYMBOL_GPL(pci_reset_bus);
3807
3414/** 3808/**
3415 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 3809 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3416 * @dev: PCI device to query 3810 * @dev: PCI device to query
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d1182c4a754e..816c297f170c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -151,7 +151,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
151 151
152} 152}
153extern struct device_attribute pci_dev_attrs[]; 153extern struct device_attribute pci_dev_attrs[];
154extern struct device_attribute pcibus_dev_attrs[]; 154extern const struct attribute_group *pcibus_groups[];
155extern struct device_type pci_dev_type; 155extern struct device_type pci_dev_type;
156extern struct bus_attribute pci_bus_attrs[]; 156extern struct bus_attribute pci_bus_attrs[];
157 157
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 569f82fc9e22..a82e70a41039 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -2,7 +2,7 @@
2# PCI Express Port Bus Configuration 2# PCI Express Port Bus Configuration
3# 3#
4config PCIEPORTBUS 4config PCIEPORTBUS
5 bool "PCI Express support" 5 bool "PCI Express Port Bus support"
6 depends on PCI 6 depends on PCI
7 help 7 help
8 This automatically enables PCI Express Port Bus support. Users can 8 This automatically enables PCI Express Port Bus support. Users can
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 76ef634caf6f..0bf82a20a0fb 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -352,7 +352,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
352 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; 352 reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
353 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); 353 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
354 354
355 aer_do_secondary_bus_reset(dev); 355 pci_reset_bridge_secondary_bus(dev);
356 dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); 356 dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
357 357
358 /* Clear Root Error Status */ 358 /* Clear Root Error Status */
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 90ea3e88041f..84420b7c9456 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -106,7 +106,6 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
106} 106}
107 107
108extern struct bus_type pcie_port_bus_type; 108extern struct bus_type pcie_port_bus_type;
109void aer_do_secondary_bus_reset(struct pci_dev *dev);
110int aer_init(struct pcie_device *dev); 109int aer_init(struct pcie_device *dev);
111void aer_isr(struct work_struct *work); 110void aer_isr(struct work_struct *work);
112void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 111void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 8b68ae59b7b6..85ca36f2136d 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -367,39 +367,6 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
367} 367}
368 368
369/** 369/**
370 * aer_do_secondary_bus_reset - perform secondary bus reset
371 * @dev: pointer to bridge's pci_dev data structure
372 *
373 * Invoked when performing link reset at Root Port or Downstream Port.
374 */
375void aer_do_secondary_bus_reset(struct pci_dev *dev)
376{
377 u16 p2p_ctrl;
378
379 /* Assert Secondary Bus Reset */
380 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
381 p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
382 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
383
384 /*
385 * we should send hot reset message for 2ms to allow it time to
386 * propagate to all downstream ports
387 */
388 msleep(2);
389
390 /* De-assert Secondary Bus Reset */
391 p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
392 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
393
394 /*
395 * System software must wait for at least 100ms from the end
396 * of a reset of one or more device before it is permitted
397 * to issue Configuration Requests to those devices.
398 */
399 msleep(200);
400}
401
402/**
403 * default_reset_link - default reset function 370 * default_reset_link - default reset function
404 * @dev: pointer to pci_dev data structure 371 * @dev: pointer to pci_dev data structure
405 * 372 *
@@ -408,7 +375,7 @@ void aer_do_secondary_bus_reset(struct pci_dev *dev)
408 */ 375 */
409static pci_ers_result_t default_reset_link(struct pci_dev *dev) 376static pci_ers_result_t default_reset_link(struct pci_dev *dev)
410{ 377{
411 aer_do_secondary_bus_reset(dev); 378 pci_reset_bridge_secondary_bus(dev);
412 dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n"); 379 dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
413 return PCI_ERS_RESULT_RECOVERED; 380 return PCI_ERS_RESULT_RECOVERED;
414} 381}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 85c114cd91cc..4ab388a6cc26 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -96,7 +96,7 @@ static void release_pcibus_dev(struct device *dev)
96static struct class pcibus_class = { 96static struct class pcibus_class = {
97 .name = "pci_bus", 97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev, 98 .dev_release = &release_pcibus_dev,
99 .dev_attrs = pcibus_dev_attrs, 99 .dev_groups = pcibus_groups,
100}; 100};
101 101
102static int __init pcibus_class_init(void) 102static int __init pcibus_class_init(void)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e85d23044ae0..f6c31fabf3af 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3126,9 +3126,6 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
3126 3126
3127static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) 3127static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3128{ 3128{
3129 int i;
3130 u16 status;
3131
3132 /* 3129 /*
3133 * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf 3130 * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
3134 * 3131 *
@@ -3140,20 +3137,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3140 if (probe) 3137 if (probe)
3141 return 0; 3138 return 0;
3142 3139
3143 /* Wait for Transaction Pending bit clean */ 3140 if (!pci_wait_for_pending_transaction(dev))
3144 for (i = 0; i < 4; i++) { 3141 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3145 if (i)
3146 msleep((1 << (i - 1)) * 100);
3147
3148 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
3149 if (!(status & PCI_EXP_DEVSTA_TRPND))
3150 goto clear;
3151 }
3152
3153 dev_err(&dev->dev, "transaction is not cleared; "
3154 "proceeding with reset anyway\n");
3155 3142
3156clear:
3157 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3143 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3158 3144
3159 msleep(100); 3145 msleep(100);
@@ -3208,6 +3194,83 @@ reset_complete:
3208 return 0; 3194 return 0;
3209} 3195}
3210 3196
3197/*
3198 * Device-specific reset method for Chelsio T4-based adapters.
3199 */
3200static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3201{
3202 u16 old_command;
3203 u16 msix_flags;
3204
3205 /*
3206 * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
3207 * that we have no device-specific reset method.
3208 */
3209 if ((dev->device & 0xf000) != 0x4000)
3210 return -ENOTTY;
3211
3212 /*
3213 * If this is the "probe" phase, return 0 indicating that we can
3214 * reset this device.
3215 */
3216 if (probe)
3217 return 0;
3218
3219 /*
3220 * T4 can wedge if there are DMAs in flight within the chip and Bus
3221 * Master has been disabled. We need to have it on till the Function
3222 * Level Reset completes. (BUS_MASTER is disabled in
3223 * pci_reset_function()).
3224 */
3225 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3226 pci_write_config_word(dev, PCI_COMMAND,
3227 old_command | PCI_COMMAND_MASTER);
3228
3229 /*
3230 * Perform the actual device function reset, saving and restoring
3231 * configuration information around the reset.
3232 */
3233 pci_save_state(dev);
3234
3235 /*
3236 * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
3237 * are disabled when an MSI-X interrupt message needs to be delivered.
3238 * So we briefly re-enable MSI-X interrupts for the duration of the
3239 * FLR. The pci_restore_state() below will restore the original
3240 * MSI-X state.
3241 */
3242 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3243 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3244 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3245 msix_flags |
3246 PCI_MSIX_FLAGS_ENABLE |
3247 PCI_MSIX_FLAGS_MASKALL);
3248
3249 /*
3250 * Start of pcie_flr() code sequence. This reset code is a copy of
3251 * the guts of pcie_flr() because that's not an exported function.
3252 */
3253
3254 if (!pci_wait_for_pending_transaction(dev))
3255 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3256
3257 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3258 msleep(100);
3259
3260 /*
3261 * End of pcie_flr() code sequence.
3262 */
3263
3264 /*
3265 * Restore the configuration information (BAR values, etc.) including
3266 * the original PCI Configuration Space Command word, and return
3267 * success.
3268 */
3269 pci_restore_state(dev);
3270 pci_write_config_word(dev, PCI_COMMAND, old_command);
3271 return 0;
3272}
3273
3211#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed 3274#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3212#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156 3275#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3213#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166 3276#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
@@ -3221,6 +3284,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3221 reset_ivb_igd }, 3284 reset_ivb_igd },
3222 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, 3285 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
3223 reset_intel_generic_dev }, 3286 reset_intel_generic_dev },
3287 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3288 reset_chelsio_generic_dev },
3224 { 0 } 3289 { 0 }
3225}; 3290};
3226 3291
@@ -3295,11 +3360,61 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
3295 return pci_dev_get(dev); 3360 return pci_dev_get(dev);
3296} 3361}
3297 3362
3363/*
3364 * AMD has indicated that the devices below do not support peer-to-peer
3365 * in any system where they are found in the southbridge with an AMD
3366 * IOMMU in the system. Multifunction devices that do not support
3367 * peer-to-peer between functions can claim to support a subset of ACS.
3368 * Such devices effectively enable request redirect (RR) and completion
3369 * redirect (CR) since all transactions are redirected to the upstream
3370 * root complex.
3371 *
3372 * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
3373 * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
3374 * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
3375 *
3376 * 1002:4385 SBx00 SMBus Controller
3377 * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
3378 * 1002:4383 SBx00 Azalia (Intel HDA)
3379 * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
3380 * 1002:4384 SBx00 PCI to PCI Bridge
3381 * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
3382 */
3383static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
3384{
3385#ifdef CONFIG_ACPI
3386 struct acpi_table_header *header = NULL;
3387 acpi_status status;
3388
3389 /* Targeting multifunction devices on the SB (appears on root bus) */
3390 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
3391 return -ENODEV;
3392
3393 /* The IVRS table describes the AMD IOMMU */
3394 status = acpi_get_table("IVRS", 0, &header);
3395 if (ACPI_FAILURE(status))
3396 return -ENODEV;
3397
3398 /* Filter out flags not applicable to multifunction */
3399 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
3400
3401 return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
3402#else
3403 return -ENODEV;
3404#endif
3405}
3406
3298static const struct pci_dev_acs_enabled { 3407static const struct pci_dev_acs_enabled {
3299 u16 vendor; 3408 u16 vendor;
3300 u16 device; 3409 u16 device;
3301 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags); 3410 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
3302} pci_dev_acs_enabled[] = { 3411} pci_dev_acs_enabled[] = {
3412 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
3413 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
3414 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
3415 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
3416 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
3417 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
3303 { 0 } 3418 { 0 }
3304}; 3419};
3305 3420
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 94b777d108bb..9be359594b05 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -747,14 +747,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
747{ 747{
748 struct pci_dev *dev; 748 struct pci_dev *dev;
749 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 749 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
750 unsigned long size = 0, size0 = 0, size1 = 0; 750 resource_size_t size = 0, size0 = 0, size1 = 0;
751 resource_size_t children_add_size = 0; 751 resource_size_t children_add_size = 0;
752 resource_size_t min_align, io_align, align; 752 resource_size_t min_align, align;
753 753
754 if (!b_res) 754 if (!b_res)
755 return; 755 return;
756 756
757 io_align = min_align = window_alignment(bus, IORESOURCE_IO); 757 min_align = window_alignment(bus, IORESOURCE_IO);
758 list_for_each_entry(dev, &bus->devices, bus_list) { 758 list_for_each_entry(dev, &bus->devices, bus_list) {
759 int i; 759 int i;
760 760
@@ -781,9 +781,6 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
781 } 781 }
782 } 782 }
783 783
784 if (min_align > io_align)
785 min_align = io_align;
786
787 size0 = calculate_iosize(size, min_size, size1, 784 size0 = calculate_iosize(size, min_size, size1,
788 resource_size(b_res), min_align); 785 resource_size(b_res), min_align);
789 if (children_add_size > add_size) 786 if (children_add_size > add_size)
@@ -807,8 +804,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
807 add_to_list(realloc_head, bus->self, b_res, size1-size0, 804 add_to_list(realloc_head, bus->self, b_res, size1-size0,
808 min_align); 805 min_align);
809 dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window " 806 dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
810 "%pR to %pR add_size %lx\n", b_res, 807 "%pR to %pR add_size %llx\n", b_res,
811 &bus->busn_res, size1-size0); 808 &bus->busn_res,
809 (unsigned long long)size1-size0);
812 } 810 }
813} 811}
814 812
@@ -838,6 +836,8 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
838 * pbus_size_mem() - size the memory window of a given bus 836 * pbus_size_mem() - size the memory window of a given bus
839 * 837 *
840 * @bus : the bus 838 * @bus : the bus
839 * @mask: mask the resource flag, then compare it with type
840 * @type: the type of free resource from bridge
841 * @min_size : the minimum memory window that must to be allocated 841 * @min_size : the minimum memory window that must to be allocated
842 * @add_size : additional optional memory window 842 * @add_size : additional optional memory window
843 * @realloc_head : track the additional memory window on this list 843 * @realloc_head : track the additional memory window on this list
@@ -1300,15 +1300,12 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
1300static int pci_bus_get_depth(struct pci_bus *bus) 1300static int pci_bus_get_depth(struct pci_bus *bus)
1301{ 1301{
1302 int depth = 0; 1302 int depth = 0;
1303 struct pci_dev *dev; 1303 struct pci_bus *child_bus;
1304 1304
1305 list_for_each_entry(dev, &bus->devices, bus_list) { 1305 list_for_each_entry(child_bus, &bus->children, node){
1306 int ret; 1306 int ret;
1307 struct pci_bus *b = dev->subordinate;
1308 if (!b)
1309 continue;
1310 1307
1311 ret = pci_bus_get_depth(b); 1308 ret = pci_bus_get_depth(child_bus);
1312 if (ret + 1 > depth) 1309 if (ret + 1 > depth)
1313 depth = ret + 1; 1310 depth = ret + 1;
1314 } 1311 }
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e494c90a00d1..82d1c78d3d91 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -914,6 +914,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev);
914void pci_msi_off(struct pci_dev *dev); 914void pci_msi_off(struct pci_dev *dev);
915int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); 915int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
916int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); 916int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
917int pci_wait_for_pending_transaction(struct pci_dev *dev);
917int pcix_get_max_mmrbc(struct pci_dev *dev); 918int pcix_get_max_mmrbc(struct pci_dev *dev);
918int pcix_get_mmrbc(struct pci_dev *dev); 919int pcix_get_mmrbc(struct pci_dev *dev);
919int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 920int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
@@ -924,6 +925,11 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
924int __pci_reset_function(struct pci_dev *dev); 925int __pci_reset_function(struct pci_dev *dev);
925int __pci_reset_function_locked(struct pci_dev *dev); 926int __pci_reset_function_locked(struct pci_dev *dev);
926int pci_reset_function(struct pci_dev *dev); 927int pci_reset_function(struct pci_dev *dev);
928int pci_probe_reset_slot(struct pci_slot *slot);
929int pci_reset_slot(struct pci_slot *slot);
930int pci_probe_reset_bus(struct pci_bus *bus);
931int pci_reset_bus(struct pci_bus *bus);
932void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
927void pci_update_resource(struct pci_dev *dev, int resno); 933void pci_update_resource(struct pci_dev *dev, int resno);
928int __must_check pci_assign_resource(struct pci_dev *dev, int i); 934int __must_check pci_assign_resource(struct pci_dev *dev, int i);
929int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); 935int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 8db71dcd6337..bd32109e607e 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -63,6 +63,9 @@ enum pcie_link_width {
63 * @get_adapter_status: Called to get see if an adapter is present in the slot or not. 63 * @get_adapter_status: Called to get see if an adapter is present in the slot or not.
64 * If this field is NULL, the value passed in the struct hotplug_slot_info 64 * If this field is NULL, the value passed in the struct hotplug_slot_info
65 * will be used when this value is requested by a user. 65 * will be used when this value is requested by a user.
66 * @reset_slot: Optional interface to allow override of a bus reset for the
67 * slot for cases where a secondary bus reset can result in spurious
68 * hotplug events or where a slot can be reset independent of the bus.
66 * 69 *
67 * The table of function pointers that is passed to the hotplug pci core by a 70 * The table of function pointers that is passed to the hotplug pci core by a
68 * hotplug pci driver. These functions are called by the hotplug pci core when 71 * hotplug pci driver. These functions are called by the hotplug pci core when
@@ -80,6 +83,7 @@ struct hotplug_slot_ops {
80 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); 83 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
81 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); 84 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
82 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); 85 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
86 int (*reset_slot) (struct hotplug_slot *slot, int probe);
83}; 87};
84 88
85/** 89/**