aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/pcie.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/pcie.c')
-rw-r--r--arch/arm/mach-tegra/pcie.c1141
1 files changed, 874 insertions, 267 deletions
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
index 2941212b853..f6195bd31d1 100644
--- a/arch/arm/mach-tegra/pcie.c
+++ b/arch/arm/mach-tegra/pcie.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * arch/arm/mach-tegra/pci.c 2 * arch/arm/mach-tegra/pcie.c
3 * 3 *
4 * PCIe host controller driver for TEGRA(2) SOCs 4 * PCIe host controller driver for TEGRA SOCs
5 * 5 *
6 * Copyright (c) 2010, CompuLab, Ltd. 6 * Copyright (c) 2010, CompuLab, Ltd.
7 * Author: Mike Rapoport <mike@compulab.co.il> 7 * Author: Mike Rapoport <mike@compulab.co.il>
8 * 8 *
9 * Based on NVIDIA PCIe driver 9 * Based on NVIDIA PCIe driver
10 * Copyright (c) 2008-2009, NVIDIA Corporation. 10 * Copyright (c) 2008-2012, NVIDIA Corporation.
11 * 11 *
12 * Bits taken from arch/arm/mach-dove/pcie.c 12 * Bits taken from arch/arm/mach-dove/pcie.c
13 * 13 *
@@ -32,6 +32,10 @@
32#include <linux/irq.h> 32#include <linux/irq.h>
33#include <linux/clk.h> 33#include <linux/clk.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/msi.h>
36#include <linux/slab.h>
37#include <linux/platform_device.h>
38#include <linux/regulator/consumer.h>
35 39
36#include <asm/sizes.h> 40#include <asm/sizes.h>
37#include <asm/mach/pci.h> 41#include <asm/mach/pci.h>
@@ -40,120 +44,176 @@
40#include <mach/iomap.h> 44#include <mach/iomap.h>
41#include <mach/clk.h> 45#include <mach/clk.h>
42#include <mach/powergate.h> 46#include <mach/powergate.h>
47#include <mach/pci.h>
48
49#define MSELECT_CONFIG_0_ENABLE_PCIE_APERTURE 5
50
51#define PINMUX_AUX_PEX_L0_RST_N_0 0x33bc
52#define PINMUX_AUX_PEX_L0_RST_N_0_E_INPUT 5
53#define PINMUX_AUX_PEX_L0_RST_N_0_E_INPUT_ENABLE 1
54
55#define PINMUX_AUX_PEX_L1_RST_N_0 0x33cc
56#define PINMUX_AUX_PEX_L1_RST_N_0_E_INPUT 5
57#define PINMUX_AUX_PEX_L1_RST_N_0_E_INPUT_ENABLE 1
58
59#define PINMUX_AUX_PEX_L2_RST_N_0 0x33d8
60#define PINMUX_AUX_PEX_L2_RST_N_0_E_INPUT 5
61#define PINMUX_AUX_PEX_L2_RST_N_0_E_INPUT_ENABLE 1
62#define AFI_PEX0_CTRL_0_PEX0_CLKREQ_EN 1
63#define NV_PCIE2_PADS_REFCLK_CFG1 0x000000cc
64#define APBDEV_PMC_SCRATCH42_0_PCX_CLAMP_MASK 0x1
65
66
67#define AFI_MSI_VEC0_0 0x6c
68#define AFI_MSI_VEC1_0 0x70
69#define AFI_MSI_VEC2_0 0x74
70#define AFI_MSI_VEC3_0 0x78
71#define AFI_MSI_VEC4_0 0x7c
72#define AFI_MSI_VEC5_0 0x80
73#define AFI_MSI_VEC6_0 0x84
74#define AFI_MSI_VEC7_0 0x88
75
76#define AFI_MSI_EN_VEC0_0 0x8c
77#define AFI_MSI_EN_VEC1_0 0x90
78#define AFI_MSI_EN_VEC2_0 0x94
79#define AFI_MSI_EN_VEC3_0 0x98
80#define AFI_MSI_EN_VEC4_0 0x9c
81#define AFI_MSI_EN_VEC5_0 0xa0
82#define AFI_MSI_EN_VEC6_0 0xa4
83#define AFI_MSI_EN_VEC7_0 0xa8
84
85#define AFI_MSI_FPCI_BAR_ST_0 0x64
86#define AFI_MSI_BAR_SZ_0 0x60
87#define AFI_MSI_AXI_BAR_ST_0 0x68
88#define AFI_INTR_MASK_0 0xb4
89#define AFI_INTR_MASK_0_INT_MASK 0
90#define AFI_INTR_MASK_0_MSI_MASK 8
91
92
93#define AFI_PEXBIAS_CTRL_0 0x168
94
43 95
44/* register definitions */ 96/* register definitions */
45#define AFI_OFFSET 0x3800 97#define AFI_OFFSET 0x3800
46#define PADS_OFFSET 0x3000 98#define PADS_OFFSET 0x3000
47#define RP0_OFFSET 0x0000 99#define RP_OFFSET 0x1000
48#define RP1_OFFSET 0x1000 100
49 101#define AFI_AXI_BAR0_SZ 0x00
50#define AFI_AXI_BAR0_SZ 0x00 102#define AFI_AXI_BAR1_SZ 0x04
51#define AFI_AXI_BAR1_SZ 0x04 103#define AFI_AXI_BAR2_SZ 0x08
52#define AFI_AXI_BAR2_SZ 0x08 104#define AFI_AXI_BAR3_SZ 0x0c
53#define AFI_AXI_BAR3_SZ 0x0c 105#define AFI_AXI_BAR4_SZ 0x10
54#define AFI_AXI_BAR4_SZ 0x10 106#define AFI_AXI_BAR5_SZ 0x14
55#define AFI_AXI_BAR5_SZ 0x14 107
56 108#define AFI_AXI_BAR0_START 0x18
57#define AFI_AXI_BAR0_START 0x18 109#define AFI_AXI_BAR1_START 0x1c
58#define AFI_AXI_BAR1_START 0x1c 110#define AFI_AXI_BAR2_START 0x20
59#define AFI_AXI_BAR2_START 0x20 111#define AFI_AXI_BAR3_START 0x24
60#define AFI_AXI_BAR3_START 0x24 112#define AFI_AXI_BAR4_START 0x28
61#define AFI_AXI_BAR4_START 0x28 113#define AFI_AXI_BAR5_START 0x2c
62#define AFI_AXI_BAR5_START 0x2c 114
63 115#define AFI_FPCI_BAR0 0x30
64#define AFI_FPCI_BAR0 0x30 116#define AFI_FPCI_BAR1 0x34
65#define AFI_FPCI_BAR1 0x34 117#define AFI_FPCI_BAR2 0x38
66#define AFI_FPCI_BAR2 0x38 118#define AFI_FPCI_BAR3 0x3c
67#define AFI_FPCI_BAR3 0x3c 119#define AFI_FPCI_BAR4 0x40
68#define AFI_FPCI_BAR4 0x40 120#define AFI_FPCI_BAR5 0x44
69#define AFI_FPCI_BAR5 0x44 121
70 122#define AFI_CACHE_BAR0_SZ 0x48
71#define AFI_CACHE_BAR0_SZ 0x48 123#define AFI_CACHE_BAR0_ST 0x4c
72#define AFI_CACHE_BAR0_ST 0x4c 124#define AFI_CACHE_BAR1_SZ 0x50
73#define AFI_CACHE_BAR1_SZ 0x50 125#define AFI_CACHE_BAR1_ST 0x54
74#define AFI_CACHE_BAR1_ST 0x54 126
75 127#define AFI_MSI_BAR_SZ 0x60
76#define AFI_MSI_BAR_SZ 0x60 128#define AFI_MSI_FPCI_BAR_ST 0x64
77#define AFI_MSI_FPCI_BAR_ST 0x64 129#define AFI_MSI_AXI_BAR_ST 0x68
78#define AFI_MSI_AXI_BAR_ST 0x68 130
79 131#define AFI_CONFIGURATION 0xac
80#define AFI_CONFIGURATION 0xac 132#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
81#define AFI_CONFIGURATION_EN_FPCI (1 << 0) 133
82 134#define AFI_FPCI_ERROR_MASKS 0xb0
83#define AFI_FPCI_ERROR_MASKS 0xb0 135
84 136#define AFI_INTR_MASK 0xb4
85#define AFI_INTR_MASK 0xb4 137#define AFI_INTR_MASK_INT_MASK (1 << 0)
86#define AFI_INTR_MASK_INT_MASK (1 << 0) 138#define AFI_INTR_MASK_MSI_MASK (1 << 8)
87#define AFI_INTR_MASK_MSI_MASK (1 << 8) 139
88 140#define AFI_INTR_CODE 0xb8
89#define AFI_INTR_CODE 0xb8 141#define AFI_INTR_CODE_MASK 0xf
90#define AFI_INTR_CODE_MASK 0xf 142#define AFI_INTR_MASTER_ABORT 4
91#define AFI_INTR_MASTER_ABORT 4 143#define AFI_INTR_LEGACY 6
92#define AFI_INTR_LEGACY 6 144
93 145#define AFI_INTR_SIGNATURE 0xbc
94#define AFI_INTR_SIGNATURE 0xbc 146#define AFI_SM_INTR_ENABLE 0xc4
95#define AFI_SM_INTR_ENABLE 0xc4 147
96 148#define AFI_AFI_INTR_ENABLE 0xc8
97#define AFI_AFI_INTR_ENABLE 0xc8 149#define AFI_INTR_EN_INI_SLVERR (1 << 0)
98#define AFI_INTR_EN_INI_SLVERR (1 << 0) 150#define AFI_INTR_EN_INI_DECERR (1 << 1)
99#define AFI_INTR_EN_INI_DECERR (1 << 1) 151#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
100#define AFI_INTR_EN_TGT_SLVERR (1 << 2) 152#define AFI_INTR_EN_TGT_DECERR (1 << 3)
101#define AFI_INTR_EN_TGT_DECERR (1 << 3) 153#define AFI_INTR_EN_TGT_WRERR (1 << 4)
102#define AFI_INTR_EN_TGT_WRERR (1 << 4) 154#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
103#define AFI_INTR_EN_DFPCI_DECERR (1 << 5) 155#define AFI_INTR_EN_AXI_DECERR (1 << 6)
104#define AFI_INTR_EN_AXI_DECERR (1 << 6) 156#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
105#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) 157#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
106 158
107#define AFI_PCIE_CONFIG 0x0f8 159#define AFI_PCIE_CONFIG 0x0f8
108#define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1) 160#define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
109#define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2) 161#define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
110#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) 162#define AFI_PCIE_CONFIG_PCIEC2_DISABLE_DEVICE (1 << 3)
111#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) 163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
112#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) 164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
113 165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
114#define AFI_FUSE 0x104 166#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
115#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) 167
116 168#define AFI_FUSE 0x104
117#define AFI_PEX0_CTRL 0x110 169#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
118#define AFI_PEX1_CTRL 0x118 170
119#define AFI_PEX_CTRL_RST (1 << 0) 171#define AFI_PEX0_CTRL 0x110
120#define AFI_PEX_CTRL_REFCLK_EN (1 << 3) 172#define AFI_PEX1_CTRL 0x118
121 173#define AFI_PEX2_CTRL 0x128
122#define RP_VEND_XP 0x00000F00 174#define AFI_PEX_CTRL_RST (1 << 0)
123#define RP_VEND_XP_DL_UP (1 << 30) 175#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
124 176
125#define RP_LINK_CONTROL_STATUS 0x00000090 177#define RP_VEND_XP 0x00000F00
126#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 178#define RP_VEND_XP_DL_UP (1 << 30)
127 179
128#define PADS_CTL_SEL 0x0000009C 180#define RP_LINK_CONTROL_STATUS 0x00000090
129 181#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
130#define PADS_CTL 0x000000A0 182
131#define PADS_CTL_IDDQ_1L (1 << 0) 183#define PADS_CTL_SEL 0x0000009C
132#define PADS_CTL_TX_DATA_EN_1L (1 << 6) 184
133#define PADS_CTL_RX_DATA_EN_1L (1 << 10) 185#define PADS_CTL 0x000000A0
134 186#define PADS_CTL_IDDQ_1L (1 << 0)
135#define PADS_PLL_CTL 0x000000B8 187#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
136#define PADS_PLL_CTL_RST_B4SM (1 << 1) 188#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
137#define PADS_PLL_CTL_LOCKDET (1 << 8) 189
138#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) 190#ifdef CONFIG_ARCH_TEGRA_2x_SOC
139#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) 191#define PADS_PLL_CTL 0x000000B8
140#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) 192#else
141#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) 193#define PADS_PLL_CTL 0x000000B4
142#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) 194#endif
143#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) 195#define PADS_PLL_CTL_RST_B4SM (1 << 1)
144#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) 196#define PADS_PLL_CTL_LOCKDET (1 << 8)
197#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
198#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
199#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
200#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
201#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
202#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
203#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
204#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
145 205
146/* PMC access is required for PCIE xclk (un)clamping */ 206/* PMC access is required for PCIE xclk (un)clamping */
147#define PMC_SCRATCH42 0x144 207#define PMC_SCRATCH42 0x144
148#define PMC_SCRATCH42_PCX_CLAMP (1 << 0) 208#define PMC_SCRATCH42_PCX_CLAMP (1 << 0)
149 209
150static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); 210#define NV_PCIE2_RP_TXBA1 0x00000E1C /* RW-4R */
151 211
152#define pmc_writel(value, reg) \ 212#define NV_PCIE2_RP_PRIV_MISC 0x00000FE0
153 __raw_writel(value, (u32)reg_pmc_base + (reg)) 213#define PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE 1 << 23
154#define pmc_readl(reg) \ 214#define PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE 1 << 31
155 __raw_readl((u32)reg_pmc_base + (reg))
156 215
216#ifdef CONFIG_ARCH_TEGRA_2x_SOC
157/* 217/*
158 * Tegra2 defines 1GB in the AXI address map for PCIe. 218 * Tegra2 defines 1GB in the AXI address map for PCIe.
159 * 219 *
@@ -181,17 +241,58 @@ static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
181#define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M) 241#define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M)
182#define MMIO_SIZE SZ_64K 242#define MMIO_SIZE SZ_64K
183#define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M) 243#define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M)
184#define MEM_SIZE_0 SZ_128M 244#define MEM_SIZE SZ_256M
185#define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0) 245#define PREFETCH_MEM_BASE_0 (MEM_BASE_0 + MEM_SIZE)
186#define MEM_SIZE_1 SZ_128M 246#define PREFETCH_MEM_SIZE SZ_512M
187#define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1) 247
188#define PREFETCH_MEM_SIZE_0 SZ_128M 248#else
189#define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0) 249
190#define PREFETCH_MEM_SIZE_1 SZ_128M 250/*
191 251 * AXI address map for the PCIe aperture , defines 1GB in the AXI
192#define PCIE_CONF_BUS(b) ((b) << 16) 252 * address map for PCIe.
193#define PCIE_CONF_DEV(d) ((d) << 11) 253 *
194#define PCIE_CONF_FUNC(f) ((f) << 8) 254 * That address space is split into different regions, with sizes and
255 * offsets as follows. Exepct for the Register space, SW is free to slice the
256 * regions as it chooces.
257 *
258 * The split below seems to work fine for now.
259 *
260 * 0x0000_0000 to 0x00ff_ffff - Register space 16MB.
261 * 0x0100_0000 to 0x01ff_ffff - Config space 16MB.
262 * 0x0200_0000 to 0x02ff_ffff - Extended config space 16MB.
263 * 0x0300_0000 to 0x03ff_ffff - Downstream IO space
264 * ... Will be filled with other BARS like MSI/upstream IO etc.
265 * 0x1000_0000 to 0x1fff_ffff - non-prefetchable memory aperture
266 * 0x2000_0000 to 0x3fff_ffff - Prefetchable memory aperture
267 *
268 * Config and Extended config sizes are choosen to support
269 * maximum of 256 devices,
270 * which is good enough for all the current use cases.
271 *
272 */
273#define TEGRA_PCIE_BASE 0x00000000
274
275#define PCIE_REGS_SZ SZ_16M
276#define PCIE_CFG_OFF PCIE_REGS_SZ
277#define PCIE_CFG_SZ SZ_16M
278#define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF)
279#define PCIE_EXT_CFG_SZ SZ_16M
280/* During the boot only registers/config and extended config apertures are
281 * mapped. Rest are mapped on demand by the PCI device drivers.
282 */
283#define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ)
284
285#define MMIO_BASE (TEGRA_PCIE_BASE + SZ_48M)
286#define MMIO_SIZE SZ_1M
287#define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M)
288#define MEM_SIZE SZ_256M
289#define PREFETCH_MEM_BASE_0 (MEM_BASE_0 + MEM_SIZE)
290#define PREFETCH_MEM_SIZE SZ_512M
291#endif
292
293#define PCIE_CONF_BUS(b) ((b) << 16)
294#define PCIE_CONF_DEV(d) ((d) << 11)
295#define PCIE_CONF_FUNC(f) ((f) << 8)
195#define PCIE_CONF_REG(r) \ 296#define PCIE_CONF_REG(r) \
196 (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF)) 297 (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF))
197 298
@@ -209,18 +310,30 @@ struct tegra_pcie_port {
209}; 310};
210 311
211struct tegra_pcie_info { 312struct tegra_pcie_info {
212 struct tegra_pcie_port port[2]; 313 struct tegra_pcie_port port[MAX_PCIE_SUPPORTED_PORTS];
213 int num_ports; 314 int num_ports;
214 315
316 void __iomem *reg_clk_base;
215 void __iomem *regs; 317 void __iomem *regs;
216 struct resource res_mmio; 318 struct resource res_mmio;
319 int power_rails_enabled;
320 int pcie_power_enabled;
217 321
218 struct clk *pex_clk; 322 struct regulator *regulator_hvdd;
219 struct clk *afi_clk; 323 struct regulator *regulator_pexio;
324 struct regulator *regulator_avdd_plle;
220 struct clk *pcie_xclk; 325 struct clk *pcie_xclk;
221 struct clk *pll_e; 326 struct clk *pll_e;
327 struct tegra_pci_platform_data *plat_data;
222}; 328};
223 329
330#define pmc_writel(value, reg) \
331 __raw_writel(value, (u32)reg_pmc_base + (reg))
332#define pmc_readl(reg) \
333 __raw_readl((u32)reg_pmc_base + (reg))
334
335static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
336
224static struct tegra_pcie_info tegra_pcie = { 337static struct tegra_pcie_info tegra_pcie = {
225 .res_mmio = { 338 .res_mmio = {
226 .name = "PCI IO", 339 .name = "PCI IO",
@@ -230,6 +343,15 @@ static struct tegra_pcie_info tegra_pcie = {
230 }, 343 },
231}; 344};
232 345
346static struct resource pcie_io_space;
347static struct resource pcie_mem_space;
348static struct resource pcie_prefetch_mem_space;
349/* disable read write while noirq operation
350 * is performed since pcie is powered off */
351static bool is_pcie_noirq_op = false;
352/* used to backup config space registers of all pcie devices */
353static u32 *pbackup_config_space = NULL;
354
233void __iomem *tegra_pcie_io_base; 355void __iomem *tegra_pcie_io_base;
234EXPORT_SYMBOL(tegra_pcie_io_base); 356EXPORT_SYMBOL(tegra_pcie_io_base);
235 357
@@ -253,6 +375,20 @@ static inline u32 pads_readl(unsigned long offset)
253 return readl(offset + PADS_OFFSET + tegra_pcie.regs); 375 return readl(offset + PADS_OFFSET + tegra_pcie.regs);
254} 376}
255 377
378static inline void rp_writel(u32 value, unsigned long offset, int rp)
379{
380 BUG_ON(rp != 0 && rp != 1 && rp != 2);
381 offset += rp * (0x1UL << (rp - 1)) * RP_OFFSET;
382 writel(value, offset + tegra_pcie.regs);
383}
384
385static inline unsigned int rp_readl(unsigned long offset, int rp)
386{
387 BUG_ON(rp != 0 && rp != 1 && rp != 2);
388 offset += rp * (0x1UL << (rp - 1)) * RP_OFFSET;
389 return readl(offset + tegra_pcie.regs);
390}
391
256static struct tegra_pcie_port *bus_to_port(int bus) 392static struct tegra_pcie_port *bus_to_port(int bus)
257{ 393{
258 int i; 394 int i;
@@ -272,6 +408,10 @@ static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
272 struct tegra_pcie_port *pp = bus_to_port(bus->number); 408 struct tegra_pcie_port *pp = bus_to_port(bus->number);
273 void __iomem *addr; 409 void __iomem *addr;
274 410
411 /* read reg is disabled without intr to avoid hang in suspend noirq */
412 if (is_pcie_noirq_op)
413 return 0;
414
275 if (pp) { 415 if (pp) {
276 if (devfn != 0) { 416 if (devfn != 0) {
277 *val = 0xffffffff; 417 *val = 0xffffffff;
@@ -305,6 +445,23 @@ static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
305 u32 mask; 445 u32 mask;
306 u32 tmp; 446 u32 tmp;
307 447
448 /* write reg is disabled without intr to avoid hang in resume noirq */
449 if (is_pcie_noirq_op)
450 return 0;
451 /* pcie core is supposed to enable bus mastering and io/mem responses
452 * if its not setting then enable corresponding bits in pci_command
453 */
454 if (where == PCI_COMMAND) {
455 if (!(val & PCI_COMMAND_IO))
456 val |= PCI_COMMAND_IO;
457 if (!(val & PCI_COMMAND_MEMORY))
458 val |= PCI_COMMAND_MEMORY;
459 if (!(val & PCI_COMMAND_MASTER))
460 val |= PCI_COMMAND_MASTER;
461 if (!(val & PCI_COMMAND_SERR))
462 val |= PCI_COMMAND_SERR;
463 }
464
308 if (pp) { 465 if (pp) {
309 if (devfn != 0) 466 if (devfn != 0)
310 return PCIBIOS_DEVICE_NOT_FOUND; 467 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -359,8 +516,14 @@ static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev)
359{ 516{
360 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 517 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
361} 518}
519
520#ifdef CONFIG_ARCH_TEGRA_2x_SOC
362DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); 521DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
363DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); 522DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
523#else
524DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
525DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
526#endif
364 527
365/* Tegra PCIE requires relaxed ordering */ 528/* Tegra PCIE requires relaxed ordering */
366static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev) 529static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev)
@@ -379,6 +542,32 @@ static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev)
379} 542}
380DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); 543DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
381 544
545static void __init tegra_pcie_preinit(void)
546{
547 pcie_io_space.name = "PCIe I/O Space";
548 pcie_io_space.start = PCIBIOS_MIN_IO;
549 pcie_io_space.end = IO_SPACE_LIMIT;
550 pcie_io_space.flags = IORESOURCE_IO;
551 if (request_resource(&ioport_resource, &pcie_io_space))
552 panic("can't allocate PCIe I/O space");
553
554 pcie_mem_space.name = "PCIe MEM Space";
555 pcie_mem_space.start = MEM_BASE_0;
556 pcie_mem_space.end = MEM_BASE_0 + MEM_SIZE - 1;
557 pcie_mem_space.flags = IORESOURCE_MEM;
558 if (request_resource(&iomem_resource, &pcie_mem_space))
559 panic("can't allocate PCIe MEM space");
560
561 pcie_prefetch_mem_space.name = "PCIe PREFETCH MEM Space";
562 pcie_prefetch_mem_space.start = PREFETCH_MEM_BASE_0;
563 pcie_prefetch_mem_space.end = PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE
564 - 1;
565 pcie_prefetch_mem_space.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
566 if (request_resource(&iomem_resource, &pcie_prefetch_mem_space))
567 panic("can't allocate PCIe PREFETCH MEM space");
568
569}
570
382static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) 571static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
383{ 572{
384 struct tegra_pcie_port *pp; 573 struct tegra_pcie_port *pp;
@@ -389,72 +578,19 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
389 pp = tegra_pcie.port + nr; 578 pp = tegra_pcie.port + nr;
390 pp->root_bus_nr = sys->busnr; 579 pp->root_bus_nr = sys->busnr;
391 580
392 /* 581 sys->resource[0] = &pcie_io_space;
393 * IORESOURCE_IO 582 sys->resource[1] = &pcie_mem_space;
394 */ 583 sys->resource[2] = &pcie_prefetch_mem_space;
395 snprintf(pp->io_space_name, sizeof(pp->io_space_name),
396 "PCIe %d I/O", pp->index);
397 pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0;
398 pp->res[0].name = pp->io_space_name;
399 if (pp->index == 0) {
400 pp->res[0].start = PCIBIOS_MIN_IO;
401 pp->res[0].end = pp->res[0].start + SZ_32K - 1;
402 } else {
403 pp->res[0].start = PCIBIOS_MIN_IO + SZ_32K;
404 pp->res[0].end = IO_SPACE_LIMIT;
405 }
406 pp->res[0].flags = IORESOURCE_IO;
407 if (request_resource(&ioport_resource, &pp->res[0]))
408 panic("Request PCIe IO resource failed\n");
409 sys->resource[0] = &pp->res[0];
410
411 /*
412 * IORESOURCE_MEM
413 */
414 snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
415 "PCIe %d MEM", pp->index);
416 pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
417 pp->res[1].name = pp->mem_space_name;
418 if (pp->index == 0) {
419 pp->res[1].start = MEM_BASE_0;
420 pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1;
421 } else {
422 pp->res[1].start = MEM_BASE_1;
423 pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1;
424 }
425 pp->res[1].flags = IORESOURCE_MEM;
426 if (request_resource(&iomem_resource, &pp->res[1]))
427 panic("Request PCIe Memory resource failed\n");
428 sys->resource[1] = &pp->res[1];
429
430 /*
431 * IORESOURCE_MEM | IORESOURCE_PREFETCH
432 */
433 snprintf(pp->prefetch_space_name, sizeof(pp->prefetch_space_name),
434 "PCIe %d PREFETCH MEM", pp->index);
435 pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0;
436 pp->res[2].name = pp->prefetch_space_name;
437 if (pp->index == 0) {
438 pp->res[2].start = PREFETCH_MEM_BASE_0;
439 pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1;
440 } else {
441 pp->res[2].start = PREFETCH_MEM_BASE_1;
442 pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1;
443 }
444 pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
445 if (request_resource(&iomem_resource, &pp->res[2]))
446 panic("Request PCIe Prefetch Memory resource failed\n");
447 sys->resource[2] = &pp->res[2];
448 584
449 return 1; 585 return 1;
450} 586}
451 587
452static int tegra_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 588static int tegra_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
453{ 589{
454 return INT_PCIE_INTR; 590 return INT_PCIE_INTR;
455} 591}
456 592
457static struct pci_bus __init *tegra_pcie_scan_bus(int nr, 593static struct pci_bus *tegra_pcie_scan_bus(int nr,
458 struct pci_sys_data *sys) 594 struct pci_sys_data *sys)
459{ 595{
460 struct tegra_pcie_port *pp; 596 struct tegra_pcie_port *pp;
@@ -468,8 +604,9 @@ static struct pci_bus __init *tegra_pcie_scan_bus(int nr,
468 return pci_scan_bus(sys->busnr, &tegra_pcie_ops, sys); 604 return pci_scan_bus(sys->busnr, &tegra_pcie_ops, sys);
469} 605}
470 606
471static struct hw_pci tegra_pcie_hw __initdata = { 607static struct hw_pci tegra_pcie_hw = {
472 .nr_controllers = 2, 608 .nr_controllers = MAX_PCIE_SUPPORTED_PORTS,
609 .preinit = tegra_pcie_preinit,
473 .setup = tegra_pcie_setup, 610 .setup = tegra_pcie_setup,
474 .scan = tegra_pcie_scan_bus, 611 .scan = tegra_pcie_scan_bus,
475 .swizzle = pci_std_swizzle, 612 .swizzle = pci_std_swizzle,
@@ -486,6 +623,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
486 "Target abort", 623 "Target abort",
487 "Master abort", 624 "Master abort",
488 "Invalid write", 625 "Invalid write",
626 ""
489 "Response decoding error", 627 "Response decoding error",
490 "AXI response decoding error", 628 "AXI response decoding error",
491 "Transcation timeout", 629 "Transcation timeout",
@@ -508,13 +646,17 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
508 * happen a lot during enumeration 646 * happen a lot during enumeration
509 */ 647 */
510 if (code == AFI_INTR_MASTER_ABORT) 648 if (code == AFI_INTR_MASTER_ABORT)
511 pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature); 649 pr_debug("PCIE: %s, signature: %08x\n",
650 err_msg[code], signature);
512 else 651 else
513 pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature); 652 pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature);
514 653
515 return IRQ_HANDLED; 654 return IRQ_HANDLED;
516} 655}
517 656
657/*
658 * PCIe support functions
659 */
518static void tegra_pcie_setup_translations(void) 660static void tegra_pcie_setup_translations(void)
519{ 661{
520 u32 fpci_bar; 662 u32 fpci_bar;
@@ -547,7 +689,7 @@ static void tegra_pcie_setup_translations(void)
547 689
548 /* Bar 3: prefetchable memory BAR */ 690 /* Bar 3: prefetchable memory BAR */
549 fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1; 691 fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1;
550 size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1; 692 size = PREFETCH_MEM_SIZE;
551 axi_address = PREFETCH_MEM_BASE_0; 693 axi_address = PREFETCH_MEM_BASE_0;
552 afi_writel(axi_address, AFI_AXI_BAR3_START); 694 afi_writel(axi_address, AFI_AXI_BAR3_START);
553 afi_writel(size >> 12, AFI_AXI_BAR3_SZ); 695 afi_writel(size >> 12, AFI_AXI_BAR3_SZ);
@@ -555,7 +697,7 @@ static void tegra_pcie_setup_translations(void)
555 697
556 /* Bar 4: non prefetchable memory BAR */ 698 /* Bar 4: non prefetchable memory BAR */
557 fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1; 699 fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1;
558 size = MEM_SIZE_0 + MEM_SIZE_1; 700 size = MEM_SIZE;
559 axi_address = MEM_BASE_0; 701 axi_address = MEM_BASE_0;
560 afi_writel(axi_address, AFI_AXI_BAR4_START); 702 afi_writel(axi_address, AFI_AXI_BAR4_START);
561 afi_writel(size >> 12, AFI_AXI_BAR4_SZ); 703 afi_writel(size >> 12, AFI_AXI_BAR4_SZ);
@@ -586,10 +728,21 @@ static void tegra_pcie_enable_controller(void)
586{ 728{
587 u32 val, reg; 729 u32 val, reg;
588 int i; 730 int i;
731 void __iomem *reg_apb_misc_base;
732 void __iomem *reg_mselect_base;
733 reg_apb_misc_base = IO_ADDRESS(TEGRA_APB_MISC_BASE);
734 reg_mselect_base = IO_ADDRESS(TEGRA_MSELECT_BASE);
735
736 /* select the PCIE APERTURE in MSELECT config */
737 reg = readl(reg_mselect_base);
738 reg |= 1 << MSELECT_CONFIG_0_ENABLE_PCIE_APERTURE;
739 writel(reg, reg_mselect_base);
589 740
590 /* Enable slot clock and pulse the reset signals */ 741 /* Enable slot clock and pulse the reset signals */
591 for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) { 742 for (i = 0, reg = AFI_PEX0_CTRL; i < MAX_PCIE_SUPPORTED_PORTS;
592 val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN; 743 i++, reg += (i*8)) {
744 val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN |
745 (1 << AFI_PEX0_CTRL_0_PEX0_CLKREQ_EN);
593 afi_writel(val, reg); 746 afi_writel(val, reg);
594 val &= ~AFI_PEX_CTRL_RST; 747 val &= ~AFI_PEX_CTRL_RST;
595 afi_writel(val, reg); 748 afi_writel(val, reg);
@@ -597,13 +750,19 @@ static void tegra_pcie_enable_controller(void)
597 val = afi_readl(reg) | AFI_PEX_CTRL_RST; 750 val = afi_readl(reg) | AFI_PEX_CTRL_RST;
598 afi_writel(val, reg); 751 afi_writel(val, reg);
599 } 752 }
753 afi_writel(0, AFI_PEXBIAS_CTRL_0);
600 754
601 /* Enable dual controller and both ports */ 755 /* Enable dual controller and both ports */
602 val = afi_readl(AFI_PCIE_CONFIG); 756 val = afi_readl(AFI_PCIE_CONFIG);
603 val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE | 757 val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE |
604 AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE | 758 AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE |
759 AFI_PCIE_CONFIG_PCIEC2_DISABLE_DEVICE |
605 AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK); 760 AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK);
761#ifdef CONFIG_ARCH_TEGRA_2x_SOC
606 val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; 762 val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
763#else
764 val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
765#endif
607 afi_writel(val, AFI_PCIE_CONFIG); 766 afi_writel(val, AFI_PCIE_CONFIG);
608 767
609 val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS; 768 val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS;
@@ -622,7 +781,12 @@ static void tegra_pcie_enable_controller(void)
622 */ 781 */
623 val = pads_readl(PADS_PLL_CTL); 782 val = pads_readl(PADS_PLL_CTL);
624 val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); 783 val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
784#ifdef CONFIG_ARCH_TEGRA_2x_SOC
625 val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10); 785 val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10);
786#else
787 val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML |
788 PADS_PLL_CTL_TXCLKREF_BUF_EN);
789#endif
626 pads_writel(val, PADS_PLL_CTL); 790 pads_writel(val, PADS_PLL_CTL);
627 791
628 /* take PLL out of reset */ 792 /* take PLL out of reset */
@@ -634,6 +798,7 @@ static void tegra_pcie_enable_controller(void)
634 * This doesn't exist in the documentation 798 * This doesn't exist in the documentation
635 */ 799 */
636 pads_writel(0xfa5cfa5c, 0xc8); 800 pads_writel(0xfa5cfa5c, 0xc8);
801 pads_writel(0x0000FA5C, NV_PCIE2_PADS_REFCLK_CFG1);
637 802
638 /* Wait for the PLL to lock */ 803 /* Wait for the PLL to lock */
639 do { 804 do {
@@ -653,12 +818,18 @@ static void tegra_pcie_enable_controller(void)
653 tegra_periph_reset_deassert(tegra_pcie.pcie_xclk); 818 tegra_periph_reset_deassert(tegra_pcie.pcie_xclk);
654 819
655 /* Finally enable PCIe */ 820 /* Finally enable PCIe */
656 val = afi_readl(AFI_CONFIGURATION) | AFI_CONFIGURATION_EN_FPCI; 821 val = afi_readl(AFI_CONFIGURATION);
822 val = val | AFI_CONFIGURATION_EN_FPCI;
823
824 /* Enable DFPCI_RSPPASSPW */
825 val |= 4;
826
657 afi_writel(val, AFI_CONFIGURATION); 827 afi_writel(val, AFI_CONFIGURATION);
658 828
659 val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | 829 val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
660 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | 830 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
661 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR); 831 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR |
832 AFI_INTR_EN_PRSNT_SENSE);
662 afi_writel(val, AFI_AFI_INTR_ENABLE); 833 afi_writel(val, AFI_AFI_INTR_ENABLE);
663 afi_writel(0xffffffff, AFI_SM_INTR_ENABLE); 834 afi_writel(0xffffffff, AFI_SM_INTR_ENABLE);
664 835
@@ -671,105 +842,168 @@ static void tegra_pcie_enable_controller(void)
671 return; 842 return;
672} 843}
673 844
674static void tegra_pcie_xclk_clamp(bool clamp) 845static int tegra_pci_enable_regulators(void)
675{ 846{
676 u32 reg; 847 if (tegra_pcie.power_rails_enabled)
848 return 0;
849 if (tegra_pcie.regulator_hvdd == NULL) {
850 printk(KERN_INFO "PCIE.C: %s : regulator hvdd_pex\n",
851 __func__);
852 tegra_pcie.regulator_hvdd =
853 regulator_get(NULL, "hvdd_pex");
854 if (IS_ERR_OR_NULL(tegra_pcie.regulator_hvdd)) {
855 pr_err("%s: unable to get hvdd_pex regulator\n",
856 __func__);
857 tegra_pcie.regulator_hvdd = 0;
858 }
859 }
860
861 if (tegra_pcie.regulator_pexio == NULL) {
862 printk(KERN_INFO "PCIE.C: %s : regulator pexio\n", __func__);
863 tegra_pcie.regulator_pexio =
864 regulator_get(NULL, "vdd_pexb");
865 if (IS_ERR_OR_NULL(tegra_pcie.regulator_pexio)) {
866 pr_err("%s: unable to get pexio regulator\n", __func__);
867 tegra_pcie.regulator_pexio = 0;
868 }
869 }
677 870
678 reg = pmc_readl(PMC_SCRATCH42) & ~PMC_SCRATCH42_PCX_CLAMP; 871 /*SATA and PCIE use same PLLE, In default configuration,
872 * and we set default AVDD_PLLE with SATA.
873 * So if use default board, you have to turn on (LDO2) AVDD_PLLE.
874 */
875 if (tegra_pcie.regulator_avdd_plle == NULL) {
876 printk(KERN_INFO "PCIE.C: %s : regulator avdd_plle\n",
877 __func__);
878 tegra_pcie.regulator_avdd_plle = regulator_get(NULL,
879 "avdd_plle");
880 if (IS_ERR_OR_NULL(tegra_pcie.regulator_avdd_plle)) {
881 pr_err("%s: unable to get avdd_plle regulator\n",
882 __func__);
883 tegra_pcie.regulator_avdd_plle = 0;
884 }
885 }
886 if (tegra_pcie.regulator_hvdd)
887 regulator_enable(tegra_pcie.regulator_hvdd);
888 if (tegra_pcie.regulator_pexio)
889 regulator_enable(tegra_pcie.regulator_pexio);
890 if (tegra_pcie.regulator_avdd_plle)
891 regulator_enable(tegra_pcie.regulator_avdd_plle);
679 892
680 if (clamp) 893 tegra_pcie.power_rails_enabled = 1;
681 reg |= PMC_SCRATCH42_PCX_CLAMP;
682 894
683 pmc_writel(reg, PMC_SCRATCH42); 895 return 0;
684} 896}
685 897
686static void tegra_pcie_power_off(void) 898static int tegra_pci_disable_regulators(void)
687{ 899{
688 tegra_periph_reset_assert(tegra_pcie.pcie_xclk); 900 int err = 0;
689 tegra_periph_reset_assert(tegra_pcie.afi_clk); 901 if (tegra_pcie.power_rails_enabled == 0)
690 tegra_periph_reset_assert(tegra_pcie.pex_clk); 902 goto err_exit;
903 if (tegra_pcie.regulator_hvdd)
904 err = regulator_disable(tegra_pcie.regulator_hvdd);
905 if (err)
906 goto err_exit;
907 if (tegra_pcie.regulator_pexio)
908 err = regulator_disable(tegra_pcie.regulator_pexio);
909 if (err)
910 goto err_exit;
911 if (tegra_pcie.regulator_avdd_plle)
912 err = regulator_disable(tegra_pcie.regulator_avdd_plle);
913 tegra_pcie.power_rails_enabled = 0;
914err_exit:
915 return err;
916}
691 917
692 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); 918static int tegra_pcie_power_on(void)
693 tegra_pcie_xclk_clamp(true); 919{
920 int err = 0;
921 if (tegra_pcie.pcie_power_enabled)
922 return 0;
923 err = tegra_pci_enable_regulators();
924 if (err)
925 goto err_exit;
926 err = tegra_unpowergate_partition_with_clk_on(TEGRA_POWERGATE_PCIE);
927 if (err)
928 goto err_exit;
929 if (tegra_pcie.pll_e)
930 clk_enable(tegra_pcie.pll_e);
931
932 tegra_pcie.pcie_power_enabled = 1;
933err_exit:
934 return err;
694} 935}
695 936
696static int tegra_pcie_power_regate(void) 937static int tegra_pcie_power_off(void)
697{ 938{
698 int err; 939 int err = 0;
940 if (tegra_pcie.pcie_power_enabled == 0)
941 return 0;
942 if (tegra_pcie.pll_e)
943 clk_disable(tegra_pcie.pll_e);
699 944
700 tegra_pcie_power_off(); 945 err = tegra_powergate_partition_with_clk_off(TEGRA_POWERGATE_PCIE);
946 if (err)
947 goto err_exit;
701 948
702 tegra_pcie_xclk_clamp(true); 949 err = tegra_pci_disable_regulators();
703 950
704 tegra_periph_reset_assert(tegra_pcie.pcie_xclk); 951 tegra_pcie.pcie_power_enabled = 0;
705 tegra_periph_reset_assert(tegra_pcie.afi_clk); 952err_exit:
953 return err;
954}
706 955
707 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, 956static int tegra_pcie_power_regate(void)
708 tegra_pcie.pex_clk); 957{
958 int err;
959 err = tegra_unpowergate_partition_with_clk_on(TEGRA_POWERGATE_PCIE);
709 if (err) { 960 if (err) {
710 pr_err("PCIE: powerup sequence failed: %d\n", err); 961 pr_err("PCIE: powerup sequence failed: %d\n", err);
711 return err; 962 return err;
712 } 963 }
713 964 tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
714 tegra_periph_reset_deassert(tegra_pcie.afi_clk);
715
716 tegra_pcie_xclk_clamp(false);
717
718 clk_enable(tegra_pcie.afi_clk);
719 clk_enable(tegra_pcie.pex_clk);
720 return clk_enable(tegra_pcie.pll_e); 965 return clk_enable(tegra_pcie.pll_e);
721} 966}
722 967
723static int tegra_pcie_clocks_get(void) 968static int tegra_pcie_clocks_get(void)
724{ 969{
725 int err; 970 /* reset the PCIEXCLK */
726 971 tegra_pcie.pcie_xclk = clk_get_sys("tegra_pcie", "pciex");
727 tegra_pcie.pex_clk = clk_get(NULL, "pex"); 972 if (IS_ERR_OR_NULL(tegra_pcie.pcie_xclk)) {
728 if (IS_ERR(tegra_pcie.pex_clk)) 973 pr_err("%s: unable to get PCIE Xclock\n", __func__);
729 return PTR_ERR(tegra_pcie.pex_clk); 974 goto error_exit;
730
731 tegra_pcie.afi_clk = clk_get(NULL, "afi");
732 if (IS_ERR(tegra_pcie.afi_clk)) {
733 err = PTR_ERR(tegra_pcie.afi_clk);
734 goto err_afi_clk;
735 } 975 }
736
737 tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk");
738 if (IS_ERR(tegra_pcie.pcie_xclk)) {
739 err = PTR_ERR(tegra_pcie.pcie_xclk);
740 goto err_pcie_xclk;
741 }
742
743 tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e"); 976 tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e");
744 if (IS_ERR(tegra_pcie.pll_e)) { 977 if (IS_ERR_OR_NULL(tegra_pcie.pll_e)) {
745 err = PTR_ERR(tegra_pcie.pll_e); 978 pr_err("%s: unable to get PLLE\n", __func__);
746 goto err_pll_e; 979 goto error_exit;
747 } 980 }
748
749 return 0; 981 return 0;
750 982error_exit:
751err_pll_e: 983 if (tegra_pcie.pcie_xclk)
752 clk_put(tegra_pcie.pcie_xclk); 984 clk_put(tegra_pcie.pcie_xclk);
753err_pcie_xclk: 985 if (tegra_pcie.pll_e)
754 clk_put(tegra_pcie.afi_clk); 986 clk_put(tegra_pcie.pll_e);
755err_afi_clk: 987 return -EINVAL;
756 clk_put(tegra_pcie.pex_clk);
757
758 return err;
759} 988}
760 989
761static void tegra_pcie_clocks_put(void) 990static void tegra_pcie_clocks_put(void)
762{ 991{
763 clk_put(tegra_pcie.pll_e); 992 clk_put(tegra_pcie.pll_e);
764 clk_put(tegra_pcie.pcie_xclk); 993 clk_put(tegra_pcie.pcie_xclk);
765 clk_put(tegra_pcie.afi_clk);
766 clk_put(tegra_pcie.pex_clk);
767} 994}
768 995
769static int __init tegra_pcie_get_resources(void) 996static int __init tegra_pcie_get_resources(void)
770{ 997{
771 struct resource *res_mmio = &tegra_pcie.res_mmio; 998 struct resource *res_mmio = 0;
772 int err; 999 int err;
1000 tegra_pcie.power_rails_enabled = 0;
1001 err = tegra_pci_enable_regulators();
1002 if (err) {
1003 pr_err("PCIE: failed to enable power rails %d\n", err);
1004 goto err_pwr_on_rail;
1005 }
1006 tegra_unpowergate_partition(TEGRA_POWERGATE_PCIE);
773 1007
774 err = tegra_pcie_clocks_get(); 1008 err = tegra_pcie_clocks_get();
775 if (err) { 1009 if (err) {
@@ -789,6 +1023,7 @@ static int __init tegra_pcie_get_resources(void)
789 err = -ENOMEM; 1023 err = -ENOMEM;
790 goto err_map_reg; 1024 goto err_map_reg;
791 } 1025 }
1026 res_mmio = &tegra_pcie.res_mmio;
792 1027
793 err = request_resource(&iomem_resource, res_mmio); 1028 err = request_resource(&iomem_resource, res_mmio);
794 if (err) { 1029 if (err) {
@@ -824,7 +1059,8 @@ err_map_reg:
824 tegra_pcie_power_off(); 1059 tegra_pcie_power_off();
825err_pwr_on: 1060err_pwr_on:
826 tegra_pcie_clocks_put(); 1061 tegra_pcie_clocks_put();
827 1062err_pwr_on_rail:
1063 tegra_pci_disable_regulators();
828 return err; 1064 return err;
829} 1065}
830 1066
@@ -871,11 +1107,10 @@ static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx,
871 1107
872retry: 1108retry:
873 /* Pulse the PEX reset */ 1109 /* Pulse the PEX reset */
874 reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
875 afi_writel(reg, reset_reg);
876 mdelay(1);
877 reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST; 1110 reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST;
878 afi_writel(reg, reset_reg); 1111 afi_writel(reg, reset_reg);
1112 reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
1113 afi_writel(reg, reset_reg);
879 1114
880 retries--; 1115 retries--;
881 } while (retries); 1116 } while (retries);
@@ -886,6 +1121,7 @@ retry:
886static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg) 1121static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
887{ 1122{
888 struct tegra_pcie_port *pp; 1123 struct tegra_pcie_port *pp;
1124 unsigned int data;
889 1125
890 pp = tegra_pcie.port + tegra_pcie.num_ports; 1126 pp = tegra_pcie.port + tegra_pcie.num_ports;
891 1127
@@ -898,6 +1134,18 @@ static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
898 printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index); 1134 printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index);
899 return; 1135 return;
900 } 1136 }
1137 /* Power mangagement settings */
1138 /* Enable clock clamping by default */
1139 data = rp_readl(NV_PCIE2_RP_PRIV_MISC, index);
1140 data |= (PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE) |
1141 (PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE);
1142 rp_writel(data, NV_PCIE2_RP_PRIV_MISC, index);
1143
1144 /* Initialize TXBA1 register */
1145 data = rp_readl(NV_PCIE2_RP_TXBA1, index);
1146 data &= 0xffffff00;
1147 data |= 0x44;
1148 rp_writel(data, NV_PCIE2_RP_TXBA1, index);
901 1149
902 tegra_pcie.num_ports++; 1150 tegra_pcie.num_ports++;
903 pp->index = index; 1151 pp->index = index;
@@ -905,29 +1153,388 @@ static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
905 memset(pp->res, 0, sizeof(pp->res)); 1153 memset(pp->res, 0, sizeof(pp->res));
906} 1154}
907 1155
908int __init tegra_pcie_init(bool init_port0, bool init_port1) 1156static int tegra_pcie_init(void)
909{ 1157{
910 int err; 1158 int err = 0;
911 1159 int port;
912 if (!(init_port0 || init_port1)) 1160 int rp_offset = 0;
913 return -ENODEV; 1161 int ctrl_offset = AFI_PEX0_CTRL;
914 1162
1163#ifdef CONFIG_ARCH_TEGRA_2x_SOC
1164 pcibios_min_mem = 0x1000;
1165 pcibios_min_io = 0;
1166#else
1167 pcibios_min_mem = 0x03000000ul;
1168 pcibios_min_io = 0x10000000ul;
1169#endif
915 err = tegra_pcie_get_resources(); 1170 err = tegra_pcie_get_resources();
916 if (err) 1171 if (err)
917 return err; 1172 return err;
918
919 tegra_pcie_enable_controller(); 1173 tegra_pcie_enable_controller();
920 1174
921 /* setup the AFI address translations */ 1175 /* setup the AFI address translations */
922 tegra_pcie_setup_translations(); 1176 tegra_pcie_setup_translations();
1177 for (port = 0; port < MAX_PCIE_SUPPORTED_PORTS; port++) {
1178 ctrl_offset += (port * 8);
1179 rp_offset = (rp_offset + 0x1000) * port;
1180 if (tegra_pcie.plat_data->port_status[port])
1181 tegra_pcie_add_port(port, rp_offset, ctrl_offset);
1182 }
923 1183
924 if (init_port0) 1184 tegra_pcie.pcie_power_enabled = 1;
925 tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL); 1185 if (tegra_pcie.num_ports)
1186 pci_common_init(&tegra_pcie_hw);
1187 else
1188 err = tegra_pcie_power_off();
926 1189
927 if (init_port1) 1190 return err;
928 tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL); 1191}
929 1192
930 pci_common_init(&tegra_pcie_hw); 1193static int tegra_pci_probe(struct platform_device *pdev)
1194{
1195 int ret;
1196 struct pci_dev *dev = NULL;
1197
1198 tegra_pcie.plat_data = pdev->dev.platform_data;
1199 dev_dbg(&pdev->dev, "PCIE.C: %s : _port_status[0] %d\n",
1200 __func__, tegra_pcie.plat_data->port_status[0]);
1201 dev_dbg(&pdev->dev, "PCIE.C: %s : _port_status[1] %d\n",
1202 __func__, tegra_pcie.plat_data->port_status[1]);
1203 dev_dbg(&pdev->dev, "PCIE.C: %s : _port_status[2] %d\n",
1204 __func__, tegra_pcie.plat_data->port_status[2]);
1205 ret = tegra_pcie_init();
1206
1207 /* disable async PM of pci devices to ensure right order */
1208 /* suspend/resume calls of tegra and bus driver */
1209 for_each_pci_dev(dev)
1210 device_disable_async_suspend(&dev->dev);
1211
1212 return ret;
1213}
1214
1215static int tegra_pci_suspend(struct device *dev)
1216{
1217 struct pci_dev *pdev = NULL;
1218 int i, size, ndev = 0;
1219
1220 for_each_pci_dev(pdev) {
1221 /* save state of pcie devices before powering off regulators */
1222 pci_save_state(pdev);
1223 size = sizeof(pdev->saved_config_space) / sizeof(u32);
1224 ndev++;
1225 }
931 1226
1227 /* backup config space registers of all devices since it gets reset in
1228 save state call from suspend noirq due to disabling of read in it */
1229 pbackup_config_space = kzalloc(ndev * size* sizeof(u32), GFP_KERNEL);
1230 if (!pbackup_config_space)
1231 return -ENODEV;
1232 ndev = 0;
1233 for_each_pci_dev(pdev) {
1234 for (i = 0;i < size;i++) {
1235 memcpy(&pbackup_config_space[i + size*ndev],
1236 &pdev->saved_config_space[i], sizeof(u32));
1237 }
1238 ndev++;
1239 }
1240
1241 /* disable read/write registers before powering off */
1242 is_pcie_noirq_op = true;
1243
1244 return tegra_pcie_power_off();
1245}
1246static int tegra_pci_resume_noirq(struct device *dev)
1247{
1248 struct pci_dev *pdev = NULL;
1249
1250 for_each_pci_dev(pdev) {
1251 /* set this flag to avoid restore state in resume noirq */
1252 pdev->state_saved = 0;
1253 }
932 return 0; 1254 return 0;
933} 1255}
1256
1257static int tegra_pci_resume(struct device *dev)
1258{
1259 int ret;
1260 int i, size, ndev = 0;
1261 struct pci_dev *pdev = NULL;
1262
1263 ret = tegra_pcie_power_on();
1264 tegra_pcie_enable_controller();
1265 tegra_pcie_setup_translations();
1266
1267 /* enable read/write registers after powering on */
1268 is_pcie_noirq_op = false;
1269
1270 for_each_pci_dev(pdev) {
1271 /* do fixup here for all dev's since not done in resume noirq */
1272 pci_fixup_device(pci_fixup_resume_early, pdev);
1273
1274 /* set this flag to force restore state in resume */
1275 pdev->state_saved = 1;
1276
1277 /* restore config space registers from backup buffer */
1278 size = sizeof(pdev->saved_config_space) / sizeof(u32);
1279 for (i = 0;i < size;i++) {
1280 memcpy(&pdev->saved_config_space[i],
1281 &pbackup_config_space[i + size*ndev], sizeof(u32));
1282 }
1283 ndev++;
1284 }
1285 kzfree(pbackup_config_space);
1286
1287 return ret;
1288}
1289
1290static int tegra_pci_remove(struct platform_device *pdev)
1291{
1292 return 0;
1293}
1294#ifdef CONFIG_PM
1295static const struct dev_pm_ops tegra_pci_pm_ops = {
1296 .suspend = tegra_pci_suspend,
1297 .resume = tegra_pci_resume,
1298 .resume_noirq = tegra_pci_resume_noirq,
1299 };
1300#endif
1301
1302static struct platform_driver tegra_pci_driver = {
1303 .probe = tegra_pci_probe,
1304 .remove = tegra_pci_remove,
1305 .driver = {
1306 .name = "tegra-pcie",
1307 .owner = THIS_MODULE,
1308#ifdef CONFIG_PM
1309 .pm = &tegra_pci_pm_ops,
1310#endif
1311 },
1312};
1313
1314static int __init tegra_pci_init_driver(void)
1315{
1316 return platform_driver_register(&tegra_pci_driver);
1317}
1318
1319static void __exit tegra_pci_exit_driver(void)
1320{
1321 platform_driver_unregister(&tegra_pci_driver);
1322}
1323
1324module_init(tegra_pci_init_driver);
1325module_exit(tegra_pci_exit_driver);
1326
1327static struct irq_chip tegra_irq_chip_msi_pcie = {
1328 .name = "PCIe-MSI",
1329 .irq_mask = mask_msi_irq,
1330 .irq_unmask = unmask_msi_irq,
1331 .irq_enable = unmask_msi_irq,
1332 .irq_disable = mask_msi_irq,
1333};
1334
1335/* 1:1 matching of these to the MSI vectors, 1 per bit */
1336/* and each mapping matches one of the available interrupts */
1337/* irq should equal INT_PCI_MSI_BASE + index */
1338struct msi_map_entry {
1339 bool used;
1340 u8 index;
1341 int irq;
1342};
1343
1344/* hardware supports 256 max*/
1345#if (INT_PCI_MSI_NR > 256)
1346#error "INT_PCI_MSI_NR too big"
1347#endif
1348
1349#define MSI_MAP_SIZE (INT_PCI_MSI_NR)
1350static struct msi_map_entry msi_map[MSI_MAP_SIZE];
1351
1352static void msi_map_init(void)
1353{
1354 int i;
1355
1356 for (i = 0; i < MSI_MAP_SIZE; i++) {
1357 msi_map[i].used = false;
1358 msi_map[i].index = i;
1359 msi_map[i].irq = 0;
1360 }
1361}
1362
1363/* returns an index into the map*/
1364static struct msi_map_entry *msi_map_get(void)
1365{
1366 struct msi_map_entry *retval = NULL;
1367 int i;
1368
1369 for (i = 0; i < MSI_MAP_SIZE; i++) {
1370 if (!msi_map[i].used) {
1371 retval = msi_map + i;
1372 retval->irq = INT_PCI_MSI_BASE + i;
1373 retval->used = true;
1374 break;
1375 }
1376 }
1377
1378 return retval;
1379}
1380
1381void msi_map_release(struct msi_map_entry *entry)
1382{
1383 if (entry) {
1384 entry->used = false;
1385 entry->irq = 0;
1386 }
1387}
1388
1389static irqreturn_t pci_tegra_msi_isr(int irq, void *arg)
1390{
1391 int i;
1392 int offset;
1393 int index;
1394 u32 reg;
1395
1396 for (i = 0; i < 8; i++) {
1397 reg = afi_readl(AFI_MSI_VEC0_0 + i * 4);
1398 while (reg != 0x00000000) {
1399 offset = find_first_bit((unsigned long int *)&reg, 32);
1400 index = i * 32 + offset;
1401 /* clear the interrupt */
1402 afi_writel(1ul << index, AFI_MSI_VEC0_0 + i * 4);
1403 if (index < MSI_MAP_SIZE) {
1404 if (msi_map[index].used)
1405 generic_handle_irq(msi_map[index].irq);
1406 else
1407 printk(KERN_INFO "unexpected MSI (1)\n");
1408 } else {
1409 /* that's weird who triggered this?*/
1410 /* just clear it*/
1411 printk(KERN_INFO "unexpected MSI (2)\n");
1412 }
1413 /* see if there's any more pending in this vector */
1414 reg = afi_readl(AFI_MSI_VEC0_0 + i * 4);
1415 }
1416 }
1417
1418 return IRQ_HANDLED;
1419}
1420
1421static bool pci_tegra_enable_msi(void)
1422{
1423 bool retval = false;
1424 static bool already_done;
1425 u32 reg;
1426 u32 msi_base = 0;
1427 u32 msi_aligned = 0;
1428
1429 /* enables MSI interrupts. */
1430 /* this only happens once. */
1431 if (already_done) {
1432 retval = true;
1433 goto exit;
1434 }
1435
1436 msi_map_init();
1437
1438 if (request_irq(INT_PCIE_MSI, pci_tegra_msi_isr,
1439 IRQF_SHARED, "PCIe-MSI",
1440 pci_tegra_msi_isr)) {
1441 pr_err("%s: Cannot register IRQ %u\n",
1442 __func__, INT_PCIE_MSI);
1443 goto exit;
1444 }
1445
1446 /* setup AFI/FPCI range */
1447 /* FIXME do this better! should be based on PAGE_SIZE */
1448 msi_base = __get_free_pages(GFP_KERNEL, 3);
1449 msi_aligned = ((msi_base + ((1<<12) - 1)) & ~((1<<12) - 1));
1450 msi_aligned = virt_to_phys((void *)msi_aligned);
1451
1452#ifdef CONFIG_ARCH_TEGRA_2x_SOC
1453 afi_writel(msi_aligned, AFI_MSI_FPCI_BAR_ST_0);
1454#else
1455 /* different from T20!*/
1456 afi_writel(msi_aligned>>8, AFI_MSI_FPCI_BAR_ST_0);
1457#endif
1458 afi_writel(msi_aligned, AFI_MSI_AXI_BAR_ST_0);
1459 /* this register is in 4K increments */
1460 afi_writel(1, AFI_MSI_BAR_SZ_0);
1461
1462 /* enable all MSI vectors */
1463 afi_writel(0xffffffff, AFI_MSI_EN_VEC0_0);
1464 afi_writel(0xffffffff, AFI_MSI_EN_VEC1_0);
1465 afi_writel(0xffffffff, AFI_MSI_EN_VEC2_0);
1466 afi_writel(0xffffffff, AFI_MSI_EN_VEC3_0);
1467 afi_writel(0xffffffff, AFI_MSI_EN_VEC4_0);
1468 afi_writel(0xffffffff, AFI_MSI_EN_VEC5_0);
1469 afi_writel(0xffffffff, AFI_MSI_EN_VEC6_0);
1470 afi_writel(0xffffffff, AFI_MSI_EN_VEC7_0);
1471
1472 /* and unmask the MSI interrupt */
1473 reg = 0;
1474 reg |= ((1 << AFI_INTR_MASK_0_INT_MASK) |
1475 (1 << AFI_INTR_MASK_0_MSI_MASK));
1476 afi_writel(reg, AFI_INTR_MASK_0);
1477
1478 set_irq_flags(INT_PCIE_MSI, IRQF_VALID);
1479
1480 already_done = true;
1481 retval = true;
1482exit:
1483 if (!retval) {
1484 if (msi_base)
1485 free_pages(msi_base, 3);
1486 }
1487 return retval;
1488}
1489
1490
1491/* called by arch_setup_msi_irqs in drivers/pci/msi.c */
1492int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1493{
1494 int retval = -EINVAL;
1495 struct msi_msg msg;
1496 struct msi_map_entry *map_entry = NULL;
1497
1498 if (!pci_tegra_enable_msi())
1499 goto exit;
1500
1501 map_entry = msi_map_get();
1502 if (map_entry == NULL)
1503 goto exit;
1504
1505 irq_alloc_desc(map_entry->irq);
1506 irq_set_chip_and_handler(map_entry->irq,
1507 &tegra_irq_chip_msi_pcie,
1508 handle_simple_irq);
1509
1510 irq_set_msi_desc(map_entry->irq, desc);
1511 set_irq_flags(map_entry->irq, IRQF_VALID);
1512
1513 msg.address_lo = afi_readl(AFI_MSI_AXI_BAR_ST_0);
1514 /* 32 bit address only */
1515 msg.address_hi = 0;
1516 msg.data = map_entry->index;
1517
1518 write_msi_msg(map_entry->irq, &msg);
1519
1520 retval = 0;
1521exit:
1522 if (retval != 0) {
1523 if (map_entry)
1524 msi_map_release(map_entry);
1525 }
1526
1527 return retval;
1528}
1529
1530void arch_teardown_msi_irq(unsigned int irq)
1531{
1532 int i;
1533 for (i = 0; i < MSI_MAP_SIZE; i++) {
1534 if ((msi_map[i].used) && (msi_map[i].irq == irq)) {
1535 irq_free_desc(msi_map[i].irq);
1536 msi_map_release(msi_map + i);
1537 break;
1538 }
1539 }
1540}