aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/cavium-octeon/Makefile4
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c311
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile1
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-errata.c70
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c144
-rw-r--r--arch/mips/cavium-octeon/msi.c288
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/pci-common.c137
-rw-r--r--arch/mips/cavium-octeon/pci-common.h39
-rw-r--r--arch/mips/cavium-octeon/pci.c568
-rw-r--r--arch/mips/cavium-octeon/pcie.c1370
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-errata.h33
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-jtag.h43
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h12
-rw-r--r--arch/mips/include/asm/octeon/octeon.h2
16 files changed, 3024 insertions, 2 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 25f3b0a11ca8..96f05e588f4c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -618,6 +618,8 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
618 select SYS_HAS_EARLY_PRINTK 618 select SYS_HAS_EARLY_PRINTK
619 select SYS_HAS_CPU_CAVIUM_OCTEON 619 select SYS_HAS_CPU_CAVIUM_OCTEON
620 select SWAP_IO_SPACE 620 select SWAP_IO_SPACE
621 select HW_HAS_PCI
622 select ARCH_SUPPORTS_MSI
621 help 623 help
622 This option supports all of the Octeon reference boards from Cavium 624 This option supports all of the Octeon reference boards from Cavium
623 Networks. It builds a kernel that dynamically determines the Octeon 625 Networks. It builds a kernel that dynamically determines the Octeon
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index d6903c3f3d51..7c0528b0e34c 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -14,5 +14,9 @@ obj-y += dma-octeon.o flash_setup.o
14obj-y += octeon-memcpy.o 14obj-y += octeon-memcpy.o
15 15
16obj-$(CONFIG_SMP) += smp.o 16obj-$(CONFIG_SMP) += smp.o
17obj-$(CONFIG_PCI) += pci-common.o
18obj-$(CONFIG_PCI) += pci.o
19obj-$(CONFIG_PCI) += pcie.o
20obj-$(CONFIG_PCI_MSI) += msi.o
17 21
18EXTRA_CFLAGS += -Werror 22EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 01b1ef94b361..627c162a6159 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -13,20 +13,327 @@
13 */ 13 */
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/dma-mapping.h>
19#include <linux/platform_device.h>
20#include <linux/scatterlist.h>
21
22#include <linux/cache.h>
23#include <linux/io.h>
24
25#include <asm/octeon/octeon.h>
26#include <asm/octeon/cvmx-npi-defs.h>
27#include <asm/octeon/cvmx-pci-defs.h>
16 28
17#include <dma-coherence.h> 29#include <dma-coherence.h>
18 30
31#ifdef CONFIG_PCI
32#include "pci-common.h"
33#endif
34
35#define BAR2_PCI_ADDRESS 0x8000000000ul
36
37struct bar1_index_state {
38 int16_t ref_count; /* Number of PCI mappings using this index */
39 uint16_t address_bits; /* Upper bits of physical address. This is
40 shifted 22 bits */
41};
42
43#ifdef CONFIG_PCI
44static DEFINE_SPINLOCK(bar1_lock);
45static struct bar1_index_state bar1_state[32];
46#endif
47
19dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) 48dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
20{ 49{
50#ifndef CONFIG_PCI
21 /* Without PCI/PCIe this function can be called for Octeon internal 51 /* Without PCI/PCIe this function can be called for Octeon internal
22 devices such as USB. These devices all support 64bit addressing */ 52 devices such as USB. These devices all support 64bit addressing */
23 mb(); 53 mb();
24 return virt_to_phys(ptr); 54 return virt_to_phys(ptr);
55#else
56 unsigned long flags;
57 uint64_t dma_mask;
58 int64_t start_index;
59 dma_addr_t result = -1;
60 uint64_t physical = virt_to_phys(ptr);
61 int64_t index;
62
63 mb();
64 /*
65 * Use the DMA masks to determine the allowed memory
66 * region. For us it doesn't limit the actual memory, just the
67 * address visible over PCI. Devices with limits need to use
68 * lower indexed Bar1 entries.
69 */
70 if (dev) {
71 dma_mask = dev->coherent_dma_mask;
72 if (dev->dma_mask)
73 dma_mask = *dev->dma_mask;
74 } else {
75 dma_mask = 0xfffffffful;
76 }
77
78 /*
79 * Platform devices, such as the internal USB, skip all
80 * translation and use Octeon physical addresses directly.
81 */
82 if (!dev || dev->bus == &platform_bus_type)
83 return physical;
84
85 switch (octeon_dma_bar_type) {
86 case OCTEON_DMA_BAR_TYPE_PCIE:
87 if (unlikely(physical < (16ul << 10)))
88 panic("dma_map_single: Not allowed to map first 16KB."
89 " It interferes with BAR0 special area\n");
90 else if ((physical + size >= (256ul << 20)) &&
91 (physical < (512ul << 20)))
92 panic("dma_map_single: Not allowed to map bootbus\n");
93 else if ((physical + size >= 0x400000000ull) &&
94 physical < 0x410000000ull)
95 panic("dma_map_single: "
96 "Attempt to map illegal memory address 0x%llx\n",
97 physical);
98 else if (physical >= 0x420000000ull)
99 panic("dma_map_single: "
100 "Attempt to map illegal memory address 0x%llx\n",
101 physical);
102 else if ((physical + size >=
103 (4ull<<30) - (OCTEON_PCI_BAR1_HOLE_SIZE<<20))
104 && physical < (4ull<<30))
105 pr_warning("dma_map_single: Warning: "
106 "Mapping memory address that might "
107 "conflict with devices 0x%llx-0x%llx\n",
108 physical, physical+size-1);
109 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
110 if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
111 result = physical - 0x400000000ull;
112 else
113 result = physical;
114 if (((result+size-1) & dma_mask) != result+size-1)
115 panic("dma_map_single: Attempt to map address "
116 "0x%llx-0x%llx, which can't be accessed "
117 "according to the dma mask 0x%llx\n",
118 physical, physical+size-1, dma_mask);
119 goto done;
120
121 case OCTEON_DMA_BAR_TYPE_BIG:
122#ifdef CONFIG_64BIT
123 /* If the device supports 64bit addressing, then use BAR2 */
124 if (dma_mask > BAR2_PCI_ADDRESS) {
125 result = physical + BAR2_PCI_ADDRESS;
126 goto done;
127 }
128#endif
129 if (unlikely(physical < (4ul << 10))) {
130 panic("dma_map_single: Not allowed to map first 4KB. "
131 "It interferes with BAR0 special area\n");
132 } else if (physical < (256ul << 20)) {
133 if (unlikely(physical + size > (256ul << 20)))
134 panic("dma_map_single: Requested memory spans "
135 "Bar0 0:256MB and bootbus\n");
136 result = physical;
137 goto done;
138 } else if (unlikely(physical < (512ul << 20))) {
139 panic("dma_map_single: Not allowed to map bootbus\n");
140 } else if (physical < (2ul << 30)) {
141 if (unlikely(physical + size > (2ul << 30)))
142 panic("dma_map_single: Requested memory spans "
143 "Bar0 512MB:2GB and BAR1\n");
144 result = physical;
145 goto done;
146 } else if (physical < (2ul << 30) + (128 << 20)) {
147 /* Fall through */
148 } else if (physical <
149 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) {
150 if (unlikely
151 (physical + size >
152 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)))
153 panic("dma_map_single: Requested memory "
154 "extends past Bar1 (4GB-%luMB)\n",
155 OCTEON_PCI_BAR1_HOLE_SIZE);
156 result = physical;
157 goto done;
158 } else if ((physical >= 0x410000000ull) &&
159 (physical < 0x420000000ull)) {
160 if (unlikely(physical + size > 0x420000000ull))
161 panic("dma_map_single: Requested memory spans "
162 "non existant memory\n");
163 /* BAR0 fixed mapping 256MB:512MB ->
164 * 16GB+256MB:16GB+512MB */
165 result = physical - 0x400000000ull;
166 goto done;
167 } else {
168 /* Continued below switch statement */
169 }
170 break;
171
172 case OCTEON_DMA_BAR_TYPE_SMALL:
173#ifdef CONFIG_64BIT
174 /* If the device supports 64bit addressing, then use BAR2 */
175 if (dma_mask > BAR2_PCI_ADDRESS) {
176 result = physical + BAR2_PCI_ADDRESS;
177 goto done;
178 }
179#endif
180 /* Continued below switch statement */
181 break;
182
183 default:
184 panic("dma_map_single: Invalid octeon_dma_bar_type\n");
185 }
186
187 /* Don't allow mapping to span multiple Bar entries. The hardware guys
188 won't guarantee that DMA across boards work */
189 if (unlikely((physical >> 22) != ((physical + size - 1) >> 22)))
190 panic("dma_map_single: "
191 "Requested memory spans more than one Bar1 entry\n");
192
193 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
194 start_index = 31;
195 else if (unlikely(dma_mask < (1ul << 27)))
196 start_index = (dma_mask >> 22);
197 else
198 start_index = 31;
199
200 /* Only one processor can access the Bar register at once */
201 spin_lock_irqsave(&bar1_lock, flags);
202
203 /* Look through Bar1 for existing mapping that will work */
204 for (index = start_index; index >= 0; index--) {
205 if ((bar1_state[index].address_bits == physical >> 22) &&
206 (bar1_state[index].ref_count)) {
207 /* An existing mapping will work, use it */
208 bar1_state[index].ref_count++;
209 if (unlikely(bar1_state[index].ref_count < 0))
210 panic("dma_map_single: "
211 "Bar1[%d] reference count overflowed\n",
212 (int) index);
213 result = (index << 22) | (physical & ((1 << 22) - 1));
214 /* Large BAR1 is offset at 2GB */
215 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
216 result += 2ul << 30;
217 goto done_unlock;
218 }
219 }
220
221 /* No existing mappings, look for a free entry */
222 for (index = start_index; index >= 0; index--) {
223 if (unlikely(bar1_state[index].ref_count == 0)) {
224 union cvmx_pci_bar1_indexx bar1_index;
225 /* We have a free entry, use it */
226 bar1_state[index].ref_count = 1;
227 bar1_state[index].address_bits = physical >> 22;
228 bar1_index.u32 = 0;
229 /* Address bits[35:22] sent to L2C */
230 bar1_index.s.addr_idx = physical >> 22;
231 /* Don't put PCI accesses in L2. */
232 bar1_index.s.ca = 1;
233 /* Endian Swap Mode */
234 bar1_index.s.end_swp = 1;
235 /* Set '1' when the selected address range is valid. */
236 bar1_index.s.addr_v = 1;
237 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
238 bar1_index.u32);
239 /* An existing mapping will work, use it */
240 result = (index << 22) | (physical & ((1 << 22) - 1));
241 /* Large BAR1 is offset at 2GB */
242 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
243 result += 2ul << 30;
244 goto done_unlock;
245 }
246 }
247
248 pr_err("dma_map_single: "
249 "Can't find empty BAR1 index for physical mapping 0x%llx\n",
250 (unsigned long long) physical);
251
252done_unlock:
253 spin_unlock_irqrestore(&bar1_lock, flags);
254done:
255 pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result);
256 return result;
257#endif
25} 258}
26 259
27void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) 260void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
28{ 261{
29 /* Without PCI/PCIe this function can be called for Octeon internal 262#ifndef CONFIG_PCI
30 * devices such as USB. These devices all support 64bit addressing */ 263 /*
264 * Without PCI/PCIe this function can be called for Octeon internal
265 * devices such as USB. These devices all support 64bit addressing.
266 */
267 return;
268#else
269 unsigned long flags;
270 uint64_t index;
271
272 /*
273 * Platform devices, such as the internal USB, skip all
274 * translation and use Octeon physical addresses directly.
275 */
276 if (dev->bus == &platform_bus_type)
277 return;
278
279 switch (octeon_dma_bar_type) {
280 case OCTEON_DMA_BAR_TYPE_PCIE:
281 /* Nothing to do, all mappings are static */
282 goto done;
283
284 case OCTEON_DMA_BAR_TYPE_BIG:
285#ifdef CONFIG_64BIT
286 /* Nothing to do for addresses using BAR2 */
287 if (dma_addr >= BAR2_PCI_ADDRESS)
288 goto done;
289#endif
290 if (unlikely(dma_addr < (4ul << 10)))
291 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
292 dma_addr);
293 else if (dma_addr < (2ul << 30))
294 /* Nothing to do for addresses using BAR0 */
295 goto done;
296 else if (dma_addr < (2ul << 30) + (128ul << 20))
297 /* Need to unmap, fall through */
298 index = (dma_addr - (2ul << 30)) >> 22;
299 else if (dma_addr <
300 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))
301 goto done; /* Nothing to do for the rest of BAR1 */
302 else
303 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
304 dma_addr);
305 /* Continued below switch statement */
306 break;
307
308 case OCTEON_DMA_BAR_TYPE_SMALL:
309#ifdef CONFIG_64BIT
310 /* Nothing to do for addresses using BAR2 */
311 if (dma_addr >= BAR2_PCI_ADDRESS)
312 goto done;
313#endif
314 index = dma_addr >> 22;
315 /* Continued below switch statement */
316 break;
317
318 default:
319 panic("dma_unmap_single: Invalid octeon_dma_bar_type\n");
320 }
321
322 if (unlikely(index > 31))
323 panic("dma_unmap_single: "
324 "Attempt to unmap an invalid address (0x%llx)\n",
325 dma_addr);
326
327 spin_lock_irqsave(&bar1_lock, flags);
328 bar1_state[index].ref_count--;
329 if (bar1_state[index].ref_count == 0)
330 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
331 else if (unlikely(bar1_state[index].ref_count < 0))
332 panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
333 (int) index);
334 spin_unlock_irqrestore(&bar1_lock, flags);
335done:
336 pr_debug("dma_unmap_single 0x%llx\n", dma_addr);
31 return; 337 return;
338#endif
32} 339}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
index 80d6cb26766b..2fd66db6939e 100644
--- a/arch/mips/cavium-octeon/executive/Makefile
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -11,3 +11,4 @@
11 11
12obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o 12obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
13 13
14obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
new file mode 100644
index 000000000000..8fb82057cd80
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c
@@ -0,0 +1,70 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 *
30 * Fixes and workaround for Octeon chip errata. This file
31 * contains functions called by cvmx-helper to workaround known
32 * chip errata. For the most part, code doesn't need to call
33 * these functions directly.
34 *
35 */
36#include <asm/octeon/octeon.h>
37
38#include <asm/octeon/cvmx-helper-jtag.h>
39
40/**
41 * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
42 * 1 doesn't work properly. The following code disables 2nd order
43 * CDR for the specified QLM.
44 *
45 * @qlm: QLM to disable 2nd order CDR for.
46 */
47void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
48{
49 int lane;
50 cvmx_helper_qlm_jtag_init();
51 /* We need to load all four lanes of the QLM, a total of 1072 bits */
52 for (lane = 0; lane < 4; lane++) {
53 /*
54 * Each lane has 268 bits. We need to set
55 * cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> =
56 * 1. All other bits are zero. Bits go in LSB first,
57 * so start off with the zeros for bits <63:0>.
58 */
59 cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
60 /* cfg_cdr_incx<67:64>=3 */
61 cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
62 /* Zeros for bits <76:68> */
63 cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
64 /* cfg_cdr_secord<77>=1 */
65 cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
66 /* Zeros for bits <267:78> */
67 cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
68 }
69 cvmx_helper_qlm_jtag_update(qlm);
70}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
new file mode 100644
index 000000000000..c1c54890bae0
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
@@ -0,0 +1,144 @@
1
2/***********************license start***************
3 * Author: Cavium Networks
4 *
5 * Contact: support@caviumnetworks.com
6 * This file is part of the OCTEON SDK
7 *
8 * Copyright (c) 2003-2008 Cavium Networks
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, Version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This file is distributed in the hope that it will be useful, but
15 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
17 * NONINFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this file; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 * or visit http://www.gnu.org/licenses/.
24 *
25 * This file may also be available under a different license from Cavium.
26 * Contact Cavium Networks for more information
27 ***********************license end**************************************/
28
29/**
30 *
31 * Helper utilities for qlm_jtag.
32 *
33 */
34
35#include <asm/octeon/octeon.h>
36#include <asm/octeon/cvmx-helper-jtag.h>
37
38
39/**
40 * Initialize the internal QLM JTAG logic to allow programming
41 * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
42 * These functions should only be used at the direction of Cavium
43 * Networks. Programming incorrect values into the JTAG chain
44 * can cause chip damage.
45 */
46void cvmx_helper_qlm_jtag_init(void)
47{
48 union cvmx_ciu_qlm_jtgc jtgc;
49 uint32_t clock_div = 0;
50 uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000);
51 divisor = (divisor - 1) >> 2;
52 /* Convert the divisor into a power of 2 shift */
53 while (divisor) {
54 clock_div++;
55 divisor = divisor >> 1;
56 }
57
58 /*
59 * Clock divider for QLM JTAG operations. eclk is divided by
60 * 2^(CLK_DIV + 2)
61 */
62 jtgc.u64 = 0;
63 jtgc.s.clk_div = clock_div;
64 jtgc.s.mux_sel = 0;
65 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
66 jtgc.s.bypass = 0x3;
67 else
68 jtgc.s.bypass = 0xf;
69 cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
70 cvmx_read_csr(CVMX_CIU_QLM_JTGC);
71}
72
73/**
74 * Write up to 32bits into the QLM jtag chain. Bits are shifted
75 * into the MSB and out the LSB, so you should shift in the low
76 * order bits followed by the high order bits. The JTAG chain is
77 * 4 * 268 bits long, or 1072.
78 *
79 * @qlm: QLM to shift value into
80 * @bits: Number of bits to shift in (1-32).
81 * @data: Data to shift in. Bit 0 enters the chain first, followed by
82 * bit 1, etc.
83 *
84 * Returns The low order bits of the JTAG chain that shifted out of the
85 * circle.
86 */
87uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
88{
89 union cvmx_ciu_qlm_jtgd jtgd;
90 jtgd.u64 = 0;
91 jtgd.s.shift = 1;
92 jtgd.s.shft_cnt = bits - 1;
93 jtgd.s.shft_reg = data;
94 if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
95 jtgd.s.select = 1 << qlm;
96 cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
97 do {
98 jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
99 } while (jtgd.s.shift);
100 return jtgd.s.shft_reg >> (32 - bits);
101}
102
103/**
104 * Shift long sequences of zeros into the QLM JTAG chain. It is
105 * common to need to shift more than 32 bits of zeros into the
106 * chain. This function is a convience wrapper around
107 * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
108 * zeros at a time.
109 *
110 * @qlm: QLM to shift zeros into
111 * @bits:
112 */
113void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
114{
115 while (bits > 0) {
116 int n = bits;
117 if (n > 32)
118 n = 32;
119 cvmx_helper_qlm_jtag_shift(qlm, n, 0);
120 bits -= n;
121 }
122}
123
124/**
125 * Program the QLM JTAG chain into all lanes of the QLM. You must
126 * have already shifted in 268*4, or 1072 bits into the JTAG
127 * chain. Updating invalid values can possibly cause chip damage.
128 *
129 * @qlm: QLM to program
130 */
131void cvmx_helper_qlm_jtag_update(int qlm)
132{
133 union cvmx_ciu_qlm_jtgd jtgd;
134
135 /* Update the new data */
136 jtgd.u64 = 0;
137 jtgd.s.update = 1;
138 if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
139 jtgd.s.select = 1 << qlm;
140 cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
141 do {
142 jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
143 } while (jtgd.s.update);
144}
diff --git a/arch/mips/cavium-octeon/msi.c b/arch/mips/cavium-octeon/msi.c
new file mode 100644
index 000000000000..964b03b75a8f
--- /dev/null
+++ b/arch/mips/cavium-octeon/msi.c
@@ -0,0 +1,288 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/msi.h>
11#include <linux/spinlock.h>
12#include <linux/interrupt.h>
13
14#include <asm/octeon/octeon.h>
15#include <asm/octeon/cvmx-npi-defs.h>
16#include <asm/octeon/cvmx-pci-defs.h>
17#include <asm/octeon/cvmx-npei-defs.h>
18#include <asm/octeon/cvmx-pexp-defs.h>
19
20#include "pci-common.h"
21
22/*
23 * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is
24 * in use.
25 */
26static uint64_t msi_free_irq_bitmask;
27
28/*
29 * Each bit in msi_multiple_irq_bitmask tells that the device using
30 * this bit in msi_free_irq_bitmask is also using the next bit. This
31 * is used so we can disable all of the MSI interrupts when a device
32 * uses multiple.
33 */
34static uint64_t msi_multiple_irq_bitmask;
35
36/*
37 * This lock controls updates to msi_free_irq_bitmask and
38 * msi_multiple_irq_bitmask.
39 */
40static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock);
41
42
43/**
44 * Called when a driver request MSI interrupts instead of the
45 * legacy INT A-D. This routine will allocate multiple interrupts
46 * for MSI devices that support them. A device can override this by
47 * programming the MSI control bits [6:4] before calling
48 * pci_enable_msi().
49 *
50 * @param dev Device requesting MSI interrupts
51 * @param desc MSI descriptor
52 *
53 * Returns 0 on success.
54 */
55int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
56{
57 struct msi_msg msg;
58 uint16_t control;
59 int configured_private_bits;
60 int request_private_bits;
61 int irq;
62 int irq_step;
63 uint64_t search_mask;
64
65 /*
66 * Read the MSI config to figure out how many IRQs this device
67 * wants. Most devices only want 1, which will give
68 * configured_private_bits and request_private_bits equal 0.
69 */
70 pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
71 &control);
72
73 /*
74 * If the number of private bits has been configured then use
75 * that value instead of the requested number. This gives the
76 * driver the chance to override the number of interrupts
77 * before calling pci_enable_msi().
78 */
79 configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4;
80 if (configured_private_bits == 0) {
81 /* Nothing is configured, so use the hardware requested size */
82 request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1;
83 } else {
84 /*
85 * Use the number of configured bits, assuming the
86 * driver wanted to override the hardware request
87 * value.
88 */
89 request_private_bits = configured_private_bits;
90 }
91
92 /*
93 * The PCI 2.3 spec mandates that there are at most 32
94 * interrupts. If this device asks for more, only give it one.
95 */
96 if (request_private_bits > 5)
97 request_private_bits = 0;
98
99try_only_one:
100 /*
101 * The IRQs have to be aligned on a power of two based on the
102 * number being requested.
103 */
104 irq_step = 1 << request_private_bits;
105
106 /* Mask with one bit for each IRQ */
107 search_mask = (1 << irq_step) - 1;
108
109 /*
110 * We're going to search msi_free_irq_bitmask_lock for zero
111 * bits. This represents an MSI interrupt number that isn't in
112 * use.
113 */
114 spin_lock(&msi_free_irq_bitmask_lock);
115 for (irq = 0; irq < 64; irq += irq_step) {
116 if ((msi_free_irq_bitmask & (search_mask << irq)) == 0) {
117 msi_free_irq_bitmask |= search_mask << irq;
118 msi_multiple_irq_bitmask |= (search_mask >> 1) << irq;
119 break;
120 }
121 }
122 spin_unlock(&msi_free_irq_bitmask_lock);
123
124 /* Make sure the search for available interrupts didn't fail */
125 if (irq >= 64) {
126 if (request_private_bits) {
127 pr_err("arch_setup_msi_irq: Unable to find %d free "
128 "interrupts, trying just one",
129 1 << request_private_bits);
130 request_private_bits = 0;
131 goto try_only_one;
132 } else
133 panic("arch_setup_msi_irq: Unable to find a free MSI "
134 "interrupt");
135 }
136
137 /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */
138 irq += OCTEON_IRQ_MSI_BIT0;
139
140 switch (octeon_dma_bar_type) {
141 case OCTEON_DMA_BAR_TYPE_SMALL:
142 /* When not using big bar, Bar 0 is based at 128MB */
143 msg.address_lo =
144 ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
145 msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
146 case OCTEON_DMA_BAR_TYPE_BIG:
147 /* When using big bar, Bar 0 is based at 0 */
148 msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
149 msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32;
150 break;
151 case OCTEON_DMA_BAR_TYPE_PCIE:
152 /* When using PCIe, Bar 0 is based at 0 */
153 /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */
154 msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff;
155 msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32;
156 break;
157 default:
158 panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type\n");
159 }
160 msg.data = irq - OCTEON_IRQ_MSI_BIT0;
161
162 /* Update the number of IRQs the device has available to it */
163 control &= ~PCI_MSI_FLAGS_QSIZE;
164 control |= request_private_bits << 4;
165 pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
166 control);
167
168 set_irq_msi(irq, desc);
169 write_msi_msg(irq, &msg);
170 return 0;
171}
172
173
174/**
175 * Called when a device no longer needs its MSI interrupts. All
176 * MSI interrupts for the device are freed.
177 *
178 * @irq: The devices first irq number. There may be multple in sequence.
179 */
180void arch_teardown_msi_irq(unsigned int irq)
181{
182 int number_irqs;
183 uint64_t bitmask;
184
185 if ((irq < OCTEON_IRQ_MSI_BIT0) || (irq > OCTEON_IRQ_MSI_BIT63))
186 panic("arch_teardown_msi_irq: Attempted to teardown illegal "
187 "MSI interrupt (%d)", irq);
188 irq -= OCTEON_IRQ_MSI_BIT0;
189
190 /*
191 * Count the number of IRQs we need to free by looking at the
192 * msi_multiple_irq_bitmask. Each bit set means that the next
193 * IRQ is also owned by this device.
194 */
195 number_irqs = 0;
196 while ((irq+number_irqs < 64) &&
197 (msi_multiple_irq_bitmask & (1ull << (irq + number_irqs))))
198 number_irqs++;
199 number_irqs++;
200 /* Mask with one bit for each IRQ */
201 bitmask = (1 << number_irqs) - 1;
202 /* Shift the mask to the correct bit location */
203 bitmask <<= irq;
204 if ((msi_free_irq_bitmask & bitmask) != bitmask)
205 panic("arch_teardown_msi_irq: Attempted to teardown MSI "
206 "interrupt (%d) not in use", irq);
207
208 /* Checks are done, update the in use bitmask */
209 spin_lock(&msi_free_irq_bitmask_lock);
210 msi_free_irq_bitmask &= ~bitmask;
211 msi_multiple_irq_bitmask &= ~bitmask;
212 spin_unlock(&msi_free_irq_bitmask_lock);
213}
214
215
216/**
217 * Called by the interrupt handling code when an MSI interrupt
218 * occurs.
219 *
220 * @param cpl
221 * @param dev_id
222 *
223 * @return
224 */
225static irqreturn_t octeon_msi_interrupt(int cpl, void *dev_id)
226{
227 uint64_t msi_bits;
228 int irq;
229
230 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE)
231 msi_bits = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_RCV0);
232 else
233 msi_bits = cvmx_read_csr(CVMX_NPI_NPI_MSI_RCV);
234 irq = fls64(msi_bits);
235 if (irq) {
236 irq += OCTEON_IRQ_MSI_BIT0 - 1;
237 if (irq_desc[irq].action) {
238 do_IRQ(irq);
239 return IRQ_HANDLED;
240 } else {
241 pr_err("Spurious MSI interrupt %d\n", irq);
242 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
243 /* These chips have PCIe */
244 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
245 1ull << (irq -
246 OCTEON_IRQ_MSI_BIT0));
247 } else {
248 /* These chips have PCI */
249 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
250 1ull << (irq -
251 OCTEON_IRQ_MSI_BIT0));
252 }
253 }
254 }
255 return IRQ_NONE;
256}
257
258
259/**
260 * Initializes the MSI interrupt handling code
261 *
262 * @return
263 */
264int octeon_msi_initialize(void)
265{
266 int r;
267 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
268 r = request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt,
269 IRQF_SHARED,
270 "MSI[0:63]", octeon_msi_interrupt);
271 } else if (octeon_is_pci_host()) {
272 r = request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt,
273 IRQF_SHARED,
274 "MSI[0:15]", octeon_msi_interrupt);
275 r += request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt,
276 IRQF_SHARED,
277 "MSI[16:31]", octeon_msi_interrupt);
278 r += request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt,
279 IRQF_SHARED,
280 "MSI[32:47]", octeon_msi_interrupt);
281 r += request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt,
282 IRQF_SHARED,
283 "MSI[48:63]", octeon_msi_interrupt);
284 }
285 return 0;
286}
287
288subsys_initcall(octeon_msi_initialize);
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index d3a0c8154bec..8dfa009e0070 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -10,6 +10,8 @@
10#include <linux/hardirq.h> 10#include <linux/hardirq.h>
11 11
12#include <asm/octeon/octeon.h> 12#include <asm/octeon/octeon.h>
13#include <asm/octeon/cvmx-pexp-defs.h>
14#include <asm/octeon/cvmx-npi-defs.h>
13 15
14DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); 16DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
15DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); 17DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
diff --git a/arch/mips/cavium-octeon/pci-common.c b/arch/mips/cavium-octeon/pci-common.c
new file mode 100644
index 000000000000..cd029f88da7f
--- /dev/null
+++ b/arch/mips/cavium-octeon/pci-common.c
@@ -0,0 +1,137 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/pci.h>
11#include <linux/interrupt.h>
12#include <linux/time.h>
13#include <linux/delay.h>
14#include "pci-common.h"
15
16typeof(pcibios_map_irq) *octeon_pcibios_map_irq;
17enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
18
19/**
20 * Map a PCI device to the appropriate interrupt line
21 *
22 * @param dev The Linux PCI device structure for the device to map
23 * @param slot The slot number for this device on __BUS 0__. Linux
24 * enumerates through all the bridges and figures out the
25 * slot on Bus 0 where this device eventually hooks to.
26 * @param pin The PCI interrupt pin read from the device, then swizzled
27 * as it goes through each bridge.
28 * @return Interrupt number for the device
29 */
30int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
31{
32 if (octeon_pcibios_map_irq)
33 return octeon_pcibios_map_irq(dev, slot, pin);
34 else
35 panic("octeon_pcibios_map_irq doesn't point to a "
36 "pcibios_map_irq() function");
37}
38
39
40/**
41 * Called to perform platform specific PCI setup
42 *
43 * @param dev
44 * @return
45 */
46int pcibios_plat_dev_init(struct pci_dev *dev)
47{
48 uint16_t config;
49 uint32_t dconfig;
50 int pos;
51 /*
52 * Force the Cache line setting to 64 bytes. The standard
53 * Linux bus scan doesn't seem to set it. Octeon really has
54 * 128 byte lines, but Intel bridges get really upset if you
55 * try and set values above 64 bytes. Value is specified in
56 * 32bit words.
57 */
58 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
59 /* Set latency timers for all devices */
60 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48);
61
62 /* Enable reporting System errors and parity errors on all devices */
63 /* Enable parity checking and error reporting */
64 pci_read_config_word(dev, PCI_COMMAND, &config);
65 config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
66 pci_write_config_word(dev, PCI_COMMAND, config);
67
68 if (dev->subordinate) {
69 /* Set latency timers on sub bridges */
70 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48);
71 /* More bridge error detection */
72 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
73 config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
74 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
75 }
76
77 /* Enable the PCIe normal error reporting */
78 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
79 if (pos) {
80 /* Update Device Control */
81 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
82 /* Correctable Error Reporting */
83 config |= PCI_EXP_DEVCTL_CERE;
84 /* Non-Fatal Error Reporting */
85 config |= PCI_EXP_DEVCTL_NFERE;
86 /* Fatal Error Reporting */
87 config |= PCI_EXP_DEVCTL_FERE;
88 /* Unsupported Request */
89 config |= PCI_EXP_DEVCTL_URRE;
90 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
91 }
92
93 /* Find the Advanced Error Reporting capability */
94 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
95 if (pos) {
96 /* Clear Uncorrectable Error Status */
97 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
98 &dconfig);
99 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
100 dconfig);
101 /* Enable reporting of all uncorrectable errors */
102 /* Uncorrectable Error Mask - turned on bits disable errors */
103 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
104 /*
105 * Leave severity at HW default. This only controls if
106 * errors are reported as uncorrectable or
107 * correctable, not if the error is reported.
108 */
109 /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
110 /* Clear Correctable Error Status */
111 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
112 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
113 /* Enable reporting of all correctable errors */
114 /* Correctable Error Mask - turned on bits disable errors */
115 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
116 /* Advanced Error Capabilities */
117 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
118 /* ECRC Generation Enable */
119 if (config & PCI_ERR_CAP_ECRC_GENC)
120 config |= PCI_ERR_CAP_ECRC_GENE;
121 /* ECRC Check Enable */
122 if (config & PCI_ERR_CAP_ECRC_CHKC)
123 config |= PCI_ERR_CAP_ECRC_CHKE;
124 pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
125 /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
126 /* Report all errors to the root complex */
127 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
128 PCI_ERR_ROOT_CMD_COR_EN |
129 PCI_ERR_ROOT_CMD_NONFATAL_EN |
130 PCI_ERR_ROOT_CMD_FATAL_EN);
131 /* Clear the Root status register */
132 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
133 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
134 }
135
136 return 0;
137}
diff --git a/arch/mips/cavium-octeon/pci-common.h b/arch/mips/cavium-octeon/pci-common.h
new file mode 100644
index 000000000000..74ae79991e45
--- /dev/null
+++ b/arch/mips/cavium-octeon/pci-common.h
@@ -0,0 +1,39 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8#ifndef __OCTEON_PCI_COMMON_H__
9#define __OCTEON_PCI_COMMON_H__
10
11#include <linux/pci.h>
12
13/* Some PCI cards require delays when accessing config space. */
14#define PCI_CONFIG_SPACE_DELAY 10000
15
16/* pcibios_map_irq() is defined inside pci-common.c. All it does is call the
17 Octeon specific version pointed to by this variable. This function needs to
18 change for PCI or PCIe based hosts */
19extern typeof(pcibios_map_irq) *octeon_pcibios_map_irq;
20
21/* The following defines are only used when octeon_dma_bar_type =
22 OCTEON_DMA_BAR_TYPE_BIG */
23#define OCTEON_PCI_BAR1_HOLE_BITS 5
24#define OCTEON_PCI_BAR1_HOLE_SIZE (1ul<<(OCTEON_PCI_BAR1_HOLE_BITS+3))
25
26enum octeon_dma_bar_type {
27 OCTEON_DMA_BAR_TYPE_INVALID,
28 OCTEON_DMA_BAR_TYPE_SMALL,
29 OCTEON_DMA_BAR_TYPE_BIG,
30 OCTEON_DMA_BAR_TYPE_PCIE
31};
32
33/**
34 * This is a variable to tell the DMA mapping system in dma-octeon.c
35 * how to map PCI DMA addresses.
36 */
37extern enum octeon_dma_bar_type octeon_dma_bar_type;
38
39#endif
diff --git a/arch/mips/cavium-octeon/pci.c b/arch/mips/cavium-octeon/pci.c
new file mode 100644
index 000000000000..67c0ff5e92f1
--- /dev/null
+++ b/arch/mips/cavium-octeon/pci.c
@@ -0,0 +1,568 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/pci.h>
11#include <linux/interrupt.h>
12#include <linux/time.h>
13#include <linux/delay.h>
14
15#include <asm/time.h>
16
17#include <asm/octeon/octeon.h>
18#include <asm/octeon/cvmx-npi-defs.h>
19#include <asm/octeon/cvmx-pci-defs.h>
20
21#include "pci-common.h"
22
23#define USE_OCTEON_INTERNAL_ARBITER
24
25/*
26 * Octeon's PCI controller uses did=3, subdid=2 for PCI IO
27 * addresses. Use PCI endian swapping 1 so no address swapping is
28 * necessary. The Linux io routines will endian swap the data.
29 */
30#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
31#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
32
33/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
34#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
35
36/**
37 * This is the bit decoding used for the Octeon PCI controller addresses
38 */
39union octeon_pci_address {
40 uint64_t u64;
41 struct {
42 uint64_t upper:2;
43 uint64_t reserved:13;
44 uint64_t io:1;
45 uint64_t did:5;
46 uint64_t subdid:3;
47 uint64_t reserved2:4;
48 uint64_t endian_swap:2;
49 uint64_t reserved3:10;
50 uint64_t bus:8;
51 uint64_t dev:5;
52 uint64_t func:3;
53 uint64_t reg:8;
54 } s;
55};
56
57/**
58 * Return the mapping of PCI device number to IRQ line. Each
59 * character in the return string represents the interrupt
60 * line for the device at that position. Device 1 maps to the
61 * first character, etc. The characters A-D are used for PCI
62 * interrupts.
63 *
64 * Returns PCI interrupt mapping
65 */
66const char *octeon_get_pci_interrupts(void)
67{
68 /*
69 * Returning an empty string causes the interrupts to be
70 * routed based on the PCI specification. From the PCI spec:
71 *
72 * INTA# of Device Number 0 is connected to IRQW on the system
73 * board. (Device Number has no significance regarding being
74 * located on the system board or in a connector.) INTA# of
75 * Device Number 1 is connected to IRQX on the system
76 * board. INTA# of Device Number 2 is connected to IRQY on the
77 * system board. INTA# of Device Number 3 is connected to IRQZ
78 * on the system board. The table below describes how each
79 * agent's INTx# lines are connected to the system board
80 * interrupt lines. The following equation can be used to
81 * determine to which INTx# signal on the system board a given
82 * device's INTx# line(s) is connected.
83 *
84 * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0,
85 * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I =
86 * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
87 * INTD# = 3)
88 */
89 switch (octeon_bootinfo->board_type) {
90 case CVMX_BOARD_TYPE_NAO38:
91 /* This is really the NAC38 */
92 return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA";
93 case CVMX_BOARD_TYPE_THUNDER:
94 return "";
95 case CVMX_BOARD_TYPE_EBH3000:
96 return "";
97 case CVMX_BOARD_TYPE_EBH3100:
98 case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
99 case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
100 return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
101 case CVMX_BOARD_TYPE_BBGW_REF:
102 return "AABCD";
103 default:
104 return "";
105 }
106}
107
108/**
109 * Map a PCI device to the appropriate interrupt line
110 *
111 * @dev: The Linux PCI device structure for the device to map
112 * @slot: The slot number for this device on __BUS 0__. Linux
113 * enumerates through all the bridges and figures out the
114 * slot on Bus 0 where this device eventually hooks to.
115 * @pin: The PCI interrupt pin read from the device, then swizzled
116 * as it goes through each bridge.
117 * Returns Interrupt number for the device
118 */
119int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
120 u8 slot, u8 pin)
121{
122 int irq_num;
123 const char *interrupts;
124 int dev_num;
125
126 /* Get the board specific interrupt mapping */
127 interrupts = octeon_get_pci_interrupts();
128
129 dev_num = dev->devfn >> 3;
130 if (dev_num < strlen(interrupts))
131 irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) +
132 OCTEON_IRQ_PCI_INT0;
133 else
134 irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0;
135 return irq_num;
136}
137
138
139/**
140 * Read a value from configuration space
141 *
142 */
143static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
144 int reg, int size, u32 *val)
145{
146 union octeon_pci_address pci_addr;
147
148 pci_addr.u64 = 0;
149 pci_addr.s.upper = 2;
150 pci_addr.s.io = 1;
151 pci_addr.s.did = 3;
152 pci_addr.s.subdid = 1;
153 pci_addr.s.endian_swap = 1;
154 pci_addr.s.bus = bus->number;
155 pci_addr.s.dev = devfn >> 3;
156 pci_addr.s.func = devfn & 0x7;
157 pci_addr.s.reg = reg;
158
159#if PCI_CONFIG_SPACE_DELAY
160 udelay(PCI_CONFIG_SPACE_DELAY);
161#endif
162 switch (size) {
163 case 4:
164 *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
165 return PCIBIOS_SUCCESSFUL;
166 case 2:
167 *val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64));
168 return PCIBIOS_SUCCESSFUL;
169 case 1:
170 *val = cvmx_read64_uint8(pci_addr.u64);
171 return PCIBIOS_SUCCESSFUL;
172 }
173 return PCIBIOS_FUNC_NOT_SUPPORTED;
174}
175
176
177/**
178 * Write a value to PCI configuration space
179 *
180 * @bus:
181 * @devfn:
182 * @reg:
183 * @size:
184 * @val:
185 * Returns
186 */
187static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
188 int reg, int size, u32 val)
189{
190 union octeon_pci_address pci_addr;
191
192 pci_addr.u64 = 0;
193 pci_addr.s.upper = 2;
194 pci_addr.s.io = 1;
195 pci_addr.s.did = 3;
196 pci_addr.s.subdid = 1;
197 pci_addr.s.endian_swap = 1;
198 pci_addr.s.bus = bus->number;
199 pci_addr.s.dev = devfn >> 3;
200 pci_addr.s.func = devfn & 0x7;
201 pci_addr.s.reg = reg;
202
203#if PCI_CONFIG_SPACE_DELAY
204 udelay(PCI_CONFIG_SPACE_DELAY);
205#endif
206 switch (size) {
207 case 4:
208 cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
209 return PCIBIOS_SUCCESSFUL;
210 case 2:
211 cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val));
212 return PCIBIOS_SUCCESSFUL;
213 case 1:
214 cvmx_write64_uint8(pci_addr.u64, val);
215 return PCIBIOS_SUCCESSFUL;
216 }
217 return PCIBIOS_FUNC_NOT_SUPPORTED;
218}
219
220
221static struct pci_ops octeon_pci_ops = {
222 octeon_read_config,
223 octeon_write_config,
224};
225
226static struct resource octeon_pci_mem_resource = {
227 .start = 0,
228 .end = 0,
229 .name = "Octeon PCI MEM",
230 .flags = IORESOURCE_MEM,
231};
232
233/*
234 * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI
235 * bridge
236 */
237static struct resource octeon_pci_io_resource = {
238 .start = 0x4000,
239 .end = OCTEON_PCI_IOSPACE_SIZE - 1,
240 .name = "Octeon PCI IO",
241 .flags = IORESOURCE_IO,
242};
243
244static struct pci_controller octeon_pci_controller = {
245 .pci_ops = &octeon_pci_ops,
246 .mem_resource = &octeon_pci_mem_resource,
247 .mem_offset = OCTEON_PCI_MEMSPACE_OFFSET,
248 .io_resource = &octeon_pci_io_resource,
249 .io_offset = 0,
250 .io_map_base = OCTEON_PCI_IOSPACE_BASE,
251};
252
253
254/**
255 * Low level initialize the Octeon PCI controller
256 *
257 * Returns
258 */
259static void octeon_pci_initialize(void)
260{
261 union cvmx_pci_cfg01 cfg01;
262 union cvmx_npi_ctl_status ctl_status;
263 union cvmx_pci_ctl_status_2 ctl_status_2;
264 union cvmx_pci_cfg19 cfg19;
265 union cvmx_pci_cfg16 cfg16;
266 union cvmx_pci_cfg22 cfg22;
267 union cvmx_pci_cfg56 cfg56;
268
269 /* Reset the PCI Bus */
270 cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1);
271 cvmx_read_csr(CVMX_CIU_SOFT_PRST);
272
273 udelay(2000); /* Hold PCI reset for 2 ms */
274
275 ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */
276 ctl_status.s.max_word = 1;
277 ctl_status.s.timer = 1;
278 cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
279
280 /* Deassert PCI reset and advertize PCX Host Mode Device Capability
281 (64b) */
282 cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
283 cvmx_read_csr(CVMX_CIU_SOFT_PRST);
284
285 udelay(2000); /* Wait 2 ms after deasserting PCI reset */
286
287 ctl_status_2.u32 = 0;
288 ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set
289 before any PCI reads. */
290 ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */
291 ctl_status_2.s.bar2_enb = 1;
292 ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */
293 ctl_status_2.s.bar2_esx = 1;
294 ctl_status_2.s.pmo_amod = 1; /* Round robin priority */
295 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
296 /* BAR1 hole */
297 ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS;
298 ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */
299 ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */
300 ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */
301 ctl_status_2.s.bb1 = 1; /* BAR1 is big */
302 ctl_status_2.s.bb0 = 1; /* BAR0 is big */
303 }
304
305 octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
306 udelay(2000); /* Wait 2 ms before doing PCI reads */
307
308 ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2);
309 pr_notice("PCI Status: %s %s-bit\n",
310 ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI",
311 ctl_status_2.s.ap_64ad ? "64" : "32");
312
313 if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
314 union cvmx_pci_cnt_reg cnt_reg_start;
315 union cvmx_pci_cnt_reg cnt_reg_end;
316 unsigned long cycles, pci_clock;
317
318 cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
319 cycles = read_c0_cvmcount();
320 udelay(1000);
321 cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG);
322 cycles = read_c0_cvmcount() - cycles;
323 pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) /
324 (cycles / (mips_hpt_frequency / 1000000));
325 pr_notice("PCI Clock: %lu MHz\n", pci_clock);
326 }
327
328 /*
329 * TDOMC must be set to one in PCI mode. TDOMC should be set to 4
330 * in PCI-X mode to allow four oustanding splits. Otherwise,
331 * should not change from its reset value. Don't write PCI_CFG19
332 * in PCI mode (0x82000001 reset value), write it to 0x82000004
333 * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero.
334 * MRBCM -> must be one.
335 */
336 if (ctl_status_2.s.ap_pcix) {
337 cfg19.u32 = 0;
338 /*
339 * Target Delayed/Split request outstanding maximum
340 * count. [1..31] and 0=32. NOTE: If the user
341 * programs these bits beyond the Designed Maximum
342 * outstanding count, then the designed maximum table
343 * depth will be used instead. No additional
344 * Deferred/Split transactions will be accepted if
345 * this outstanding maximum count is
346 * reached. Furthermore, no additional deferred/split
347 * transactions will be accepted if the I/O delay/ I/O
348 * Split Request outstanding maximum is reached.
349 */
350 cfg19.s.tdomc = 4;
351 /*
352 * Master Deferred Read Request Outstanding Max Count
353 * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
354 * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
355 * 5 2 110 6 3 111 7 3 For example, if these bits are
356 * programmed to 100, the core can support 2 DAC
357 * cycles, 4 SAC cycles or a combination of 1 DAC and
358 * 2 SAC cycles. NOTE: For the PCI-X maximum
359 * outstanding split transactions, refer to
360 * CRE0[22:20].
361 */
362 cfg19.s.mdrrmc = 2;
363 /*
364 * Master Request (Memory Read) Byte Count/Byte Enable
365 * select. 0 = Byte Enables valid. In PCI mode, a
366 * burst transaction cannot be performed using Memory
367 * Read command=4?h6. 1 = DWORD Byte Count valid
368 * (default). In PCI Mode, the memory read byte
369 * enables are automatically generated by the
370 * core. Note: N3 Master Request transaction sizes are
371 * always determined through the
372 * am_attr[<35:32>|<7:0>] field.
373 */
374 cfg19.s.mrbcm = 1;
375 octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32);
376 }
377
378
379 cfg01.u32 = 0;
380 cfg01.s.msae = 1; /* Memory Space Access Enable */
381 cfg01.s.me = 1; /* Master Enable */
382 cfg01.s.pee = 1; /* PERR# Enable */
383 cfg01.s.see = 1; /* System Error Enable */
384 cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */
385
386 octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
387
388#ifdef USE_OCTEON_INTERNAL_ARBITER
389 /*
390 * When OCTEON is a PCI host, most systems will use OCTEON's
391 * internal arbiter, so must enable it before any PCI/PCI-X
392 * traffic can occur.
393 */
394 {
395 union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg;
396
397 pci_int_arb_cfg.u64 = 0;
398 pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */
399 cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64);
400 }
401#endif /* USE_OCTEON_INTERNAL_ARBITER */
402
403 /*
404 * Preferrably written to 1 to set MLTD. [RDSATI,TRTAE,
405 * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to
406 * 1..7.
407 */
408 cfg16.u32 = 0;
409 cfg16.s.mltd = 1; /* Master Latency Timer Disable */
410 octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32);
411
412 /*
413 * Should be written to 0x4ff00. MTTV -> must be zero.
414 * FLUSH -> must be 1. MRV -> should be 0xFF.
415 */
416 cfg22.u32 = 0;
417 /* Master Retry Value [1..255] and 0=infinite */
418 cfg22.s.mrv = 0xff;
419 /*
420 * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper
421 * N3K operation.
422 */
423 cfg22.s.flush = 1;
424 octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32);
425
426 /*
427 * MOST Indicates the maximum number of outstanding splits (in -1
428 * notation) when OCTEON is in PCI-X mode. PCI-X performance is
429 * affected by the MOST selection. Should generally be written
430 * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807,
431 * depending on the desired MOST of 3, 2, 1, or 0, respectively.
432 */
433 cfg56.u32 = 0;
434 cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */
435 cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */
436 cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */
437 cfg56.s.roe = 1; /* Relaxed Ordering Enable */
438 cfg56.s.mmbc = 1; /* Maximum Memory Byte Count
439 [0=512B,1=1024B,2=2048B,3=4096B] */
440 cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1
441 .. 7=32] */
442
443 octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32);
444
445 /*
446 * Affects PCI performance when OCTEON services reads to its
447 * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
448 * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
449 * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
450 * these values need to be changed so they won't possibly prefetch off
451 * of the end of memory if PCI is DMAing a buffer at the end of
452 * memory. Note that these values differ from their reset values.
453 */
454 octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21);
455 octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31);
456 octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31);
457}
458
459
460/**
461 * Initialize the Octeon PCI controller
462 *
463 * Returns
464 */
465static int __init octeon_pci_setup(void)
466{
467 union cvmx_npi_mem_access_subidx mem_access;
468 int index;
469
470 /* Only these chips have PCI */
471 if (octeon_has_feature(OCTEON_FEATURE_PCIE))
472 return 0;
473
474 /* Point pcibios_map_irq() to the PCI version of it */
475 octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
476
477 /* Only use the big bars on chips that support it */
478 if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
479 OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
480 OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
481 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL;
482 else
483 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
484
485 /* PCI I/O and PCI MEM values */
486 set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
487 ioport_resource.start = 0;
488 ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
489 if (!octeon_is_pci_host()) {
490 pr_notice("Not in host mode, PCI Controller not initialized\n");
491 return 0;
492 }
493
494 pr_notice("%s Octeon big bar support\n",
495 (octeon_dma_bar_type ==
496 OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
497
498 octeon_pci_initialize();
499
500 mem_access.u64 = 0;
501 mem_access.s.esr = 1; /* Endian-Swap on read. */
502 mem_access.s.esw = 1; /* Endian-Swap on write. */
503 mem_access.s.nsr = 0; /* No-Snoop on read. */
504 mem_access.s.nsw = 0; /* No-Snoop on write. */
505 mem_access.s.ror = 0; /* Relax Read on read. */
506 mem_access.s.row = 0; /* Relax Order on write. */
507 mem_access.s.ba = 0; /* PCI Address bits [63:36]. */
508 cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64);
509
510 /*
511 * Remap the Octeon BAR 2 above all 32 bit devices
512 * (0x8000000000ul). This is done here so it is remapped
513 * before the readl()'s below. We don't want BAR2 overlapping
514 * with BAR0/BAR1 during these reads.
515 */
516 octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0);
517 octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80);
518
519 /* Disable the BAR1 movable mappings */
520 for (index = 0; index < 32; index++)
521 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
522
523 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
524 /* Remap the Octeon BAR 0 to 0-2GB */
525 octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0);
526 octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
527
528 /*
529 * Remap the Octeon BAR 1 to map 2GB-4GB (minus the
530 * BAR 1 hole).
531 */
532 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
533 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
534
535 /* Devices go after BAR1 */
536 octeon_pci_mem_resource.start =
537 OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
538 (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
539 octeon_pci_mem_resource.end =
540 octeon_pci_mem_resource.start + (1ul << 30);
541 } else {
542 /* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */
543 octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20);
544 octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0);
545
546 /* Remap the Octeon BAR 1 to map 0-128MB */
547 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
548 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
549
550 /* Devices go after BAR0 */
551 octeon_pci_mem_resource.start =
552 OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
553 (4ul << 10);
554 octeon_pci_mem_resource.end =
555 octeon_pci_mem_resource.start + (1ul << 30);
556 }
557
558 register_pci_controller(&octeon_pci_controller);
559
560 /*
561 * Clear any errors that might be pending from before the bus
562 * was setup properly.
563 */
564 cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
565 return 0;
566}
567
568arch_initcall(octeon_pci_setup);
diff --git a/arch/mips/cavium-octeon/pcie.c b/arch/mips/cavium-octeon/pcie.c
new file mode 100644
index 000000000000..49d14081b3b5
--- /dev/null
+++ b/arch/mips/cavium-octeon/pcie.c
@@ -0,0 +1,1370 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007, 2008 Cavium Networks
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/pci.h>
11#include <linux/interrupt.h>
12#include <linux/time.h>
13#include <linux/delay.h>
14
15#include <asm/octeon/octeon.h>
16#include <asm/octeon/cvmx-npei-defs.h>
17#include <asm/octeon/cvmx-pciercx-defs.h>
18#include <asm/octeon/cvmx-pescx-defs.h>
19#include <asm/octeon/cvmx-pexp-defs.h>
20#include <asm/octeon/cvmx-helper-errata.h>
21
22#include "pci-common.h"
23
24union cvmx_pcie_address {
25 uint64_t u64;
26 struct {
27 uint64_t upper:2; /* Normally 2 for XKPHYS */
28 uint64_t reserved_49_61:13; /* Must be zero */
29 uint64_t io:1; /* 1 for IO space access */
30 uint64_t did:5; /* PCIe DID = 3 */
31 uint64_t subdid:3; /* PCIe SubDID = 1 */
32 uint64_t reserved_36_39:4; /* Must be zero */
33 uint64_t es:2; /* Endian swap = 1 */
34 uint64_t port:2; /* PCIe port 0,1 */
35 uint64_t reserved_29_31:3; /* Must be zero */
36 /*
37 * Selects the type of the configuration request (0 = type 0,
38 * 1 = type 1).
39 */
40 uint64_t ty:1;
41 /* Target bus number sent in the ID in the request. */
42 uint64_t bus:8;
43 /*
44 * Target device number sent in the ID in the
45 * request. Note that Dev must be zero for type 0
46 * configuration requests.
47 */
48 uint64_t dev:5;
49 /* Target function number sent in the ID in the request. */
50 uint64_t func:3;
51 /*
52 * Selects a register in the configuration space of
53 * the target.
54 */
55 uint64_t reg:12;
56 } config;
57 struct {
58 uint64_t upper:2; /* Normally 2 for XKPHYS */
59 uint64_t reserved_49_61:13; /* Must be zero */
60 uint64_t io:1; /* 1 for IO space access */
61 uint64_t did:5; /* PCIe DID = 3 */
62 uint64_t subdid:3; /* PCIe SubDID = 2 */
63 uint64_t reserved_36_39:4; /* Must be zero */
64 uint64_t es:2; /* Endian swap = 1 */
65 uint64_t port:2; /* PCIe port 0,1 */
66 uint64_t address:32; /* PCIe IO address */
67 } io;
68 struct {
69 uint64_t upper:2; /* Normally 2 for XKPHYS */
70 uint64_t reserved_49_61:13; /* Must be zero */
71 uint64_t io:1; /* 1 for IO space access */
72 uint64_t did:5; /* PCIe DID = 3 */
73 uint64_t subdid:3; /* PCIe SubDID = 3-6 */
74 uint64_t reserved_36_39:4; /* Must be zero */
75 uint64_t address:36; /* PCIe Mem address */
76 } mem;
77};
78
79/**
80 * Return the Core virtual base address for PCIe IO access. IOs are
81 * read/written as an offset from this address.
82 *
83 * @pcie_port: PCIe port the IO is for
84 *
85 * Returns 64bit Octeon IO base address for read/write
86 */
87static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
88{
89 union cvmx_pcie_address pcie_addr;
90 pcie_addr.u64 = 0;
91 pcie_addr.io.upper = 0;
92 pcie_addr.io.io = 1;
93 pcie_addr.io.did = 3;
94 pcie_addr.io.subdid = 2;
95 pcie_addr.io.es = 1;
96 pcie_addr.io.port = pcie_port;
97 return pcie_addr.u64;
98}
99
100/**
101 * Size of the IO address region returned at address
102 * cvmx_pcie_get_io_base_address()
103 *
104 * @pcie_port: PCIe port the IO is for
105 *
106 * Returns Size of the IO window
107 */
108static inline uint64_t cvmx_pcie_get_io_size(int pcie_port)
109{
110 return 1ull << 32;
111}
112
113/**
114 * Return the Core virtual base address for PCIe MEM access. Memory is
115 * read/written as an offset from this address.
116 *
117 * @pcie_port: PCIe port the IO is for
118 *
119 * Returns 64bit Octeon IO base address for read/write
120 */
121static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
122{
123 union cvmx_pcie_address pcie_addr;
124 pcie_addr.u64 = 0;
125 pcie_addr.mem.upper = 0;
126 pcie_addr.mem.io = 1;
127 pcie_addr.mem.did = 3;
128 pcie_addr.mem.subdid = 3 + pcie_port;
129 return pcie_addr.u64;
130}
131
132/**
133 * Size of the Mem address region returned at address
134 * cvmx_pcie_get_mem_base_address()
135 *
136 * @pcie_port: PCIe port the IO is for
137 *
138 * Returns Size of the Mem window
139 */
140static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
141{
142 return 1ull << 36;
143}
144
145/**
146 * Read a PCIe config space register indirectly. This is used for
147 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
148 *
149 * @pcie_port: PCIe port to read from
150 * @cfg_offset: Address to read
151 *
152 * Returns Value read
153 */
154static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
155{
156 union cvmx_pescx_cfg_rd pescx_cfg_rd;
157 pescx_cfg_rd.u64 = 0;
158 pescx_cfg_rd.s.addr = cfg_offset;
159 cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
160 pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
161 return pescx_cfg_rd.s.data;
162}
163
164/**
165 * Write a PCIe config space register indirectly. This is used for
166 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
167 *
168 * @pcie_port: PCIe port to write to
169 * @cfg_offset: Address to write
170 * @val: Value to write
171 */
172static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
173 uint32_t val)
174{
175 union cvmx_pescx_cfg_wr pescx_cfg_wr;
176 pescx_cfg_wr.u64 = 0;
177 pescx_cfg_wr.s.addr = cfg_offset;
178 pescx_cfg_wr.s.data = val;
179 cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
180}
181
182/**
183 * Build a PCIe config space request address for a device
184 *
185 * @pcie_port: PCIe port to access
186 * @bus: Sub bus
187 * @dev: Device ID
188 * @fn: Device sub function
189 * @reg: Register to access
190 *
191 * Returns 64bit Octeon IO address
192 */
193static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
194 int dev, int fn, int reg)
195{
196 union cvmx_pcie_address pcie_addr;
197 union cvmx_pciercx_cfg006 pciercx_cfg006;
198
199 pciercx_cfg006.u32 =
200 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
201 if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
202 return 0;
203
204 pcie_addr.u64 = 0;
205 pcie_addr.config.upper = 2;
206 pcie_addr.config.io = 1;
207 pcie_addr.config.did = 3;
208 pcie_addr.config.subdid = 1;
209 pcie_addr.config.es = 1;
210 pcie_addr.config.port = pcie_port;
211 pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
212 pcie_addr.config.bus = bus;
213 pcie_addr.config.dev = dev;
214 pcie_addr.config.func = fn;
215 pcie_addr.config.reg = reg;
216 return pcie_addr.u64;
217}
218
219/**
220 * Read 8bits from a Device's config space
221 *
222 * @pcie_port: PCIe port the device is on
223 * @bus: Sub bus
224 * @dev: Device ID
225 * @fn: Device sub function
226 * @reg: Register to access
227 *
228 * Returns Result of the read
229 */
230static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
231 int fn, int reg)
232{
233 uint64_t address =
234 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
235 if (address)
236 return cvmx_read64_uint8(address);
237 else
238 return 0xff;
239}
240
241/**
242 * Read 16bits from a Device's config space
243 *
244 * @pcie_port: PCIe port the device is on
245 * @bus: Sub bus
246 * @dev: Device ID
247 * @fn: Device sub function
248 * @reg: Register to access
249 *
250 * Returns Result of the read
251 */
252static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
253 int fn, int reg)
254{
255 uint64_t address =
256 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
257 if (address)
258 return le16_to_cpu(cvmx_read64_uint16(address));
259 else
260 return 0xffff;
261}
262
263/**
264 * Read 32bits from a Device's config space
265 *
266 * @pcie_port: PCIe port the device is on
267 * @bus: Sub bus
268 * @dev: Device ID
269 * @fn: Device sub function
270 * @reg: Register to access
271 *
272 * Returns Result of the read
273 */
274static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
275 int fn, int reg)
276{
277 uint64_t address =
278 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
279 if (address)
280 return le32_to_cpu(cvmx_read64_uint32(address));
281 else
282 return 0xffffffff;
283}
284
285/**
286 * Write 8bits to a Device's config space
287 *
288 * @pcie_port: PCIe port the device is on
289 * @bus: Sub bus
290 * @dev: Device ID
291 * @fn: Device sub function
292 * @reg: Register to access
293 * @val: Value to write
294 */
295static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
296 int reg, uint8_t val)
297{
298 uint64_t address =
299 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
300 if (address)
301 cvmx_write64_uint8(address, val);
302}
303
304/**
305 * Write 16bits to a Device's config space
306 *
307 * @pcie_port: PCIe port the device is on
308 * @bus: Sub bus
309 * @dev: Device ID
310 * @fn: Device sub function
311 * @reg: Register to access
312 * @val: Value to write
313 */
314static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
315 int reg, uint16_t val)
316{
317 uint64_t address =
318 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
319 if (address)
320 cvmx_write64_uint16(address, cpu_to_le16(val));
321}
322
323/**
324 * Write 32bits to a Device's config space
325 *
326 * @pcie_port: PCIe port the device is on
327 * @bus: Sub bus
328 * @dev: Device ID
329 * @fn: Device sub function
330 * @reg: Register to access
331 * @val: Value to write
332 */
333static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn,
334 int reg, uint32_t val)
335{
336 uint64_t address =
337 __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
338 if (address)
339 cvmx_write64_uint32(address, cpu_to_le32(val));
340}
341
342/**
343 * Initialize the RC config space CSRs
344 *
345 * @pcie_port: PCIe port to initialize
346 */
347static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
348{
349 union cvmx_pciercx_cfg030 pciercx_cfg030;
350 union cvmx_npei_ctl_status2 npei_ctl_status2;
351 union cvmx_pciercx_cfg070 pciercx_cfg070;
352 union cvmx_pciercx_cfg001 pciercx_cfg001;
353 union cvmx_pciercx_cfg032 pciercx_cfg032;
354 union cvmx_pciercx_cfg006 pciercx_cfg006;
355 union cvmx_pciercx_cfg008 pciercx_cfg008;
356 union cvmx_pciercx_cfg009 pciercx_cfg009;
357 union cvmx_pciercx_cfg010 pciercx_cfg010;
358 union cvmx_pciercx_cfg011 pciercx_cfg011;
359 union cvmx_pciercx_cfg035 pciercx_cfg035;
360 union cvmx_pciercx_cfg075 pciercx_cfg075;
361 union cvmx_pciercx_cfg034 pciercx_cfg034;
362
363 /* Max Payload Size (PCIE*_CFG030[MPS]) */
364 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
365 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
366 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
367 pciercx_cfg030.u32 =
368 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
369 /*
370 * Max payload size = 128 bytes for best Octeon DMA
371 * performance.
372 */
373 pciercx_cfg030.s.mps = 0;
374 /*
375 * Max read request size = 128 bytes for best Octeon DMA
376 * performance.
377 */
378 pciercx_cfg030.s.mrrs = 0;
379 /* Enable relaxed ordering. */
380 pciercx_cfg030.s.ro_en = 1;
381 /* Enable no snoop. */
382 pciercx_cfg030.s.ns_en = 1;
383 /* Correctable error reporting enable. */
384 pciercx_cfg030.s.ce_en = 1;
385 /* Non-fatal error reporting enable. */
386 pciercx_cfg030.s.nfe_en = 1;
387 /* Fatal error reporting enable. */
388 pciercx_cfg030.s.fe_en = 1;
389 /* Unsupported request reporting enable. */
390 pciercx_cfg030.s.ur_en = 1;
391 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port),
392 pciercx_cfg030.u32);
393
394 /*
395 * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match
396 * PCIE*_CFG030[MPS]
397 *
398 * Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not
399 * exceed PCIE*_CFG030[MRRS].
400 */
401 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
402 /* Max payload size = 128 bytes for best Octeon DMA performance */
403 npei_ctl_status2.s.mps = 0;
404 /* Max read request size = 128 bytes for best Octeon DMA performance */
405 npei_ctl_status2.s.mrrs = 0;
406 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
407
408 /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
409 pciercx_cfg070.u32 =
410 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
411 pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
412 pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
413 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port),
414 pciercx_cfg070.u32);
415
416 /*
417 * Access Enables (PCIE*_CFG001[MSAE,ME]) ME and MSAE should
418 * always be set.
419 *
420 * Interrupt Disable (PCIE*_CFG001[I_DIS]) System Error
421 * Message Enable (PCIE*_CFG001[SEE])
422 */
423 pciercx_cfg001.u32 =
424 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
425 pciercx_cfg001.s.msae = 1; /* Memory space enable. */
426 pciercx_cfg001.s.me = 1; /* Bus master enable. */
427 pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
428 pciercx_cfg001.s.see = 1; /* SERR# enable */
429 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port),
430 pciercx_cfg001.u32);
431
432 /* Advanced Error Recovery Message Enables */
433 /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
434 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
435 /* Use CVMX_PCIERCX_CFG067 hardware default */
436 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
437
438 /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
439 pciercx_cfg032.u32 =
440 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
441 pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
442 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port),
443 pciercx_cfg032.u32);
444
445 /* Entrance Latencies (PCIE*_CFG451[L0EL,L1EL]) */
446
447 /*
448 * Link Width Mode (PCIERCn_CFG452[LME]) - Set during
449 * cvmx_pcie_rc_initialize_link()
450 *
451 * Primary Bus Number (PCIERCn_CFG006[PBNUM])
452 *
453 * We set the primary bus number to 1 so IDT bridges are
454 * happy. They don't like zero.
455 */
456 pciercx_cfg006.u32 = 0;
457 pciercx_cfg006.s.pbnum = 1;
458 pciercx_cfg006.s.sbnum = 1;
459 pciercx_cfg006.s.subbnum = 1;
460 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port),
461 pciercx_cfg006.u32);
462
463 /*
464 * Memory-mapped I/O BAR (PCIERCn_CFG008)
465 * Most applications should disable the memory-mapped I/O BAR by
466 * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR]
467 */
468 pciercx_cfg008.u32 = 0;
469 pciercx_cfg008.s.mb_addr = 0x100;
470 pciercx_cfg008.s.ml_addr = 0;
471 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port),
472 pciercx_cfg008.u32);
473
474 /*
475 * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011)
476 * Most applications should disable the prefetchable BAR by setting
477 * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] <
478 * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE]
479 */
480 pciercx_cfg009.u32 =
481 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
482 pciercx_cfg010.u32 =
483 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
484 pciercx_cfg011.u32 =
485 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
486 pciercx_cfg009.s.lmem_base = 0x100;
487 pciercx_cfg009.s.lmem_limit = 0;
488 pciercx_cfg010.s.umem_base = 0x100;
489 pciercx_cfg011.s.umem_limit = 0;
490 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port),
491 pciercx_cfg009.u32);
492 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port),
493 pciercx_cfg010.u32);
494 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port),
495 pciercx_cfg011.u32);
496
497 /*
498 * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE])
499 * PME Interrupt Enables (PCIERCn_CFG035[PMEIE])
500 */
501 pciercx_cfg035.u32 =
502 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
503 /* System error on correctable error enable. */
504 pciercx_cfg035.s.secee = 1;
505 /* System error on fatal error enable. */
506 pciercx_cfg035.s.sefee = 1;
507 /* System error on non-fatal error enable. */
508 pciercx_cfg035.s.senfee = 1;
509 /* PME interrupt enable. */
510 pciercx_cfg035.s.pmeie = 1;
511 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port),
512 pciercx_cfg035.u32);
513
514 /*
515 * Advanced Error Recovery Interrupt Enables
516 * (PCIERCn_CFG075[CERE,NFERE,FERE])
517 */
518 pciercx_cfg075.u32 =
519 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
520 /* Correctable error reporting enable. */
521 pciercx_cfg075.s.cere = 1;
522 /* Non-fatal error reporting enable. */
523 pciercx_cfg075.s.nfere = 1;
524 /* Fatal error reporting enable. */
525 pciercx_cfg075.s.fere = 1;
526 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port),
527 pciercx_cfg075.u32);
528
529 /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN],
530 * PCIERCn_CFG034[DLLS_EN,CCINT_EN])
531 */
532 pciercx_cfg034.u32 =
533 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
534 /* Hot-plug interrupt enable. */
535 pciercx_cfg034.s.hpint_en = 1;
536 /* Data Link Layer state changed enable */
537 pciercx_cfg034.s.dlls_en = 1;
538 /* Command completed interrupt enable. */
539 pciercx_cfg034.s.ccint_en = 1;
540 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port),
541 pciercx_cfg034.u32);
542}
543
544/**
545 * Initialize a host mode PCIe link. This function takes a PCIe
546 * port from reset to a link up state. Software can then begin
547 * configuring the rest of the link.
548 *
549 * @pcie_port: PCIe port to initialize
550 *
551 * Returns Zero on success
552 */
553static int __cvmx_pcie_rc_initialize_link(int pcie_port)
554{
555 uint64_t start_cycle;
556 union cvmx_pescx_ctl_status pescx_ctl_status;
557 union cvmx_pciercx_cfg452 pciercx_cfg452;
558 union cvmx_pciercx_cfg032 pciercx_cfg032;
559 union cvmx_pciercx_cfg448 pciercx_cfg448;
560
561 /* Set the lane width */
562 pciercx_cfg452.u32 =
563 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
564 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
565 if (pescx_ctl_status.s.qlm_cfg == 0) {
566 /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
567 pciercx_cfg452.s.lme = 0xf;
568 } else {
569 /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
570 pciercx_cfg452.s.lme = 0x7;
571 }
572 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port),
573 pciercx_cfg452.u32);
574
575 /*
576 * CN52XX pass 1.x has an errata where length mismatches on UR
577 * responses can cause bus errors on 64bit memory
578 * reads. Turning off length error checking fixes this.
579 */
580 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
581 union cvmx_pciercx_cfg455 pciercx_cfg455;
582 pciercx_cfg455.u32 =
583 cvmx_pcie_cfgx_read(pcie_port,
584 CVMX_PCIERCX_CFG455(pcie_port));
585 pciercx_cfg455.s.m_cpl_len_err = 1;
586 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port),
587 pciercx_cfg455.u32);
588 }
589
590 /* Lane swap needs to be manually enabled for CN52XX */
591 if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
592 pescx_ctl_status.s.lane_swp = 1;
593 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),
594 pescx_ctl_status.u64);
595 }
596
597 /* Bring up the link */
598 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
599 pescx_ctl_status.s.lnk_enb = 1;
600 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
601
602 /*
603 * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
604 * be disabled.
605 */
606 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
607 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
608
609 /* Wait for the link to come up */
610 cvmx_dprintf("PCIe: Waiting for port %d link\n", pcie_port);
611 start_cycle = cvmx_get_cycle();
612 do {
613 if (cvmx_get_cycle() - start_cycle >
614 2 * cvmx_sysinfo_get()->cpu_clock_hz) {
615 cvmx_dprintf("PCIe: Port %d link timeout\n",
616 pcie_port);
617 return -1;
618 }
619 cvmx_wait(10000);
620 pciercx_cfg032.u32 =
621 cvmx_pcie_cfgx_read(pcie_port,
622 CVMX_PCIERCX_CFG032(pcie_port));
623 } while (pciercx_cfg032.s.dlla == 0);
624
625 /* Display the link status */
626 cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port,
627 pciercx_cfg032.s.nlw);
628
629 /*
630 * Update the Replay Time Limit. Empirically, some PCIe
631 * devices take a little longer to respond than expected under
632 * load. As a workaround for this we configure the Replay Time
633 * Limit to the value expected for a 512 byte MPS instead of
634 * our actual 256 byte MPS. The numbers below are directly
635 * from the PCIe spec table 3-4.
636 */
637 pciercx_cfg448.u32 =
638 cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
639 switch (pciercx_cfg032.s.nlw) {
640 case 1: /* 1 lane */
641 pciercx_cfg448.s.rtl = 1677;
642 break;
643 case 2: /* 2 lanes */
644 pciercx_cfg448.s.rtl = 867;
645 break;
646 case 4: /* 4 lanes */
647 pciercx_cfg448.s.rtl = 462;
648 break;
649 case 8: /* 8 lanes */
650 pciercx_cfg448.s.rtl = 258;
651 break;
652 }
653 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port),
654 pciercx_cfg448.u32);
655
656 return 0;
657}
658
659/**
660 * Initialize a PCIe port for use in host(RC) mode. It doesn't
661 * enumerate the bus.
662 *
663 * @pcie_port: PCIe port to initialize
664 *
665 * Returns Zero on success
666 */
667static int cvmx_pcie_rc_initialize(int pcie_port)
668{
669 int i;
670 union cvmx_ciu_soft_prst ciu_soft_prst;
671 union cvmx_pescx_bist_status pescx_bist_status;
672 union cvmx_pescx_bist_status2 pescx_bist_status2;
673 union cvmx_npei_ctl_status npei_ctl_status;
674 union cvmx_npei_mem_access_ctl npei_mem_access_ctl;
675 union cvmx_npei_mem_access_subidx mem_access_subid;
676 union cvmx_npei_dbg_data npei_dbg_data;
677 union cvmx_pescx_ctl_status2 pescx_ctl_status2;
678
679 /*
680 * Make sure we aren't trying to setup a target mode interface
681 * in host mode.
682 */
683 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
684 if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) {
685 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called "
686 "on port0, but port0 is not in host mode\n");
687 return -1;
688 }
689
690 /*
691 * Make sure a CN52XX isn't trying to bring up port 1 when it
692 * is disabled.
693 */
694 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
695 npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
696 if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) {
697 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() "
698 "called on port1, but port1 is "
699 "disabled\n");
700 return -1;
701 }
702 }
703
704 /*
705 * PCIe switch arbitration mode. '0' == fixed priority NPEI,
706 * PCIe0, then PCIe1. '1' == round robin.
707 */
708 npei_ctl_status.s.arb = 1;
709 /* Allow up to 0x20 config retries */
710 npei_ctl_status.s.cfg_rtry = 0x20;
711 /*
712 * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS
713 * don't reset.
714 */
715 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
716 npei_ctl_status.s.p0_ntags = 0x20;
717 npei_ctl_status.s.p1_ntags = 0x20;
718 }
719 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
720
721 /* Bring the PCIe out of reset */
722 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) {
723 /*
724 * The EBH5200 board swapped the PCIe reset lines on
725 * the board. As a workaround for this bug, we bring
726 * both PCIe ports out of reset at the same time
727 * instead of on separate calls. So for port 0, we
728 * bring both out of reset and do nothing on port 1.
729 */
730 if (pcie_port == 0) {
731 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
732 /*
733 * After a chip reset the PCIe will also be in
734 * reset. If it isn't, most likely someone is
735 * trying to init it again without a proper
736 * PCIe reset.
737 */
738 if (ciu_soft_prst.s.soft_prst == 0) {
739 /* Reset the ports */
740 ciu_soft_prst.s.soft_prst = 1;
741 cvmx_write_csr(CVMX_CIU_SOFT_PRST,
742 ciu_soft_prst.u64);
743 ciu_soft_prst.u64 =
744 cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
745 ciu_soft_prst.s.soft_prst = 1;
746 cvmx_write_csr(CVMX_CIU_SOFT_PRST1,
747 ciu_soft_prst.u64);
748 /* Wait until pcie resets the ports. */
749 udelay(2000);
750 }
751 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
752 ciu_soft_prst.s.soft_prst = 0;
753 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
754 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
755 ciu_soft_prst.s.soft_prst = 0;
756 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
757 }
758 } else {
759 /*
760 * The normal case: The PCIe ports are completely
761 * separate and can be brought out of reset
762 * independently.
763 */
764 if (pcie_port)
765 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
766 else
767 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
768 /*
769 * After a chip reset the PCIe will also be in
770 * reset. If it isn't, most likely someone is trying
771 * to init it again without a proper PCIe reset.
772 */
773 if (ciu_soft_prst.s.soft_prst == 0) {
774 /* Reset the port */
775 ciu_soft_prst.s.soft_prst = 1;
776 if (pcie_port)
777 cvmx_write_csr(CVMX_CIU_SOFT_PRST1,
778 ciu_soft_prst.u64);
779 else
780 cvmx_write_csr(CVMX_CIU_SOFT_PRST,
781 ciu_soft_prst.u64);
782 /* Wait until pcie resets the ports. */
783 udelay(2000);
784 }
785 if (pcie_port) {
786 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
787 ciu_soft_prst.s.soft_prst = 0;
788 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
789 } else {
790 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
791 ciu_soft_prst.s.soft_prst = 0;
792 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
793 }
794 }
795
796 /*
797 * Wait for PCIe reset to complete. Due to errata PCIE-700, we
798 * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
799 * fixed number of cycles.
800 */
801 cvmx_wait(400000);
802
803 /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
804 CN52XX, so we only probe it on newer chips */
805 if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
806 && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
807 /* Clear PCLK_RUN so we can check if the clock is running */
808 pescx_ctl_status2.u64 =
809 cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
810 pescx_ctl_status2.s.pclk_run = 1;
811 cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port),
812 pescx_ctl_status2.u64);
813 /*
814 * Now that we cleared PCLK_RUN, wait for it to be set
815 * again telling us the clock is running.
816 */
817 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
818 union cvmx_pescx_ctl_status2,
819 pclk_run, ==, 1, 10000)) {
820 cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n",
821 pcie_port);
822 return -1;
823 }
824 }
825
826 /*
827 * Check and make sure PCIe came out of reset. If it doesn't
828 * the board probably hasn't wired the clocks up and the
829 * interface should be skipped.
830 */
831 pescx_ctl_status2.u64 =
832 cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
833 if (pescx_ctl_status2.s.pcierst) {
834 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n",
835 pcie_port);
836 return -1;
837 }
838
839 /*
840 * Check BIST2 status. If any bits are set skip this interface. This
841 * is an attempt to catch PCIE-813 on pass 1 parts.
842 */
843 pescx_bist_status2.u64 =
844 cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
845 if (pescx_bist_status2.u64) {
846 cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this "
847 "port isn't hooked up, skipping.\n",
848 pcie_port);
849 return -1;
850 }
851
852 /* Check BIST status */
853 pescx_bist_status.u64 =
854 cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
855 if (pescx_bist_status.u64)
856 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n",
857 pcie_port, CAST64(pescx_bist_status.u64));
858
859 /* Initialize the config space CSRs */
860 __cvmx_pcie_rc_initialize_config_space(pcie_port);
861
862 /* Bring the link up */
863 if (__cvmx_pcie_rc_initialize_link(pcie_port)) {
864 cvmx_dprintf
865 ("PCIe: ERROR: cvmx_pcie_rc_initialize_link() failed\n");
866 return -1;
867 }
868
869 /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
870 npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
871 /* Allow 16 words to combine */
872 npei_mem_access_ctl.s.max_word = 0;
873 /* Wait up to 127 cycles for more data */
874 npei_mem_access_ctl.s.timer = 127;
875 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
876
877 /* Setup Mem access SubDIDs */
878 mem_access_subid.u64 = 0;
879 /* Port the request is sent to. */
880 mem_access_subid.s.port = pcie_port;
881 /* Due to an errata on pass 1 chips, no merging is allowed. */
882 mem_access_subid.s.nmerge = 1;
883 /* Endian-swap for Reads. */
884 mem_access_subid.s.esr = 1;
885 /* Endian-swap for Writes. */
886 mem_access_subid.s.esw = 1;
887 /* No Snoop for Reads. */
888 mem_access_subid.s.nsr = 1;
889 /* No Snoop for Writes. */
890 mem_access_subid.s.nsw = 1;
891 /* Disable Relaxed Ordering for Reads. */
892 mem_access_subid.s.ror = 0;
893 /* Disable Relaxed Ordering for Writes. */
894 mem_access_subid.s.row = 0;
895 /* PCIe Adddress Bits <63:34>. */
896 mem_access_subid.s.ba = 0;
897
898 /*
899 * Setup mem access 12-15 for port 0, 16-19 for port 1,
900 * supplying 36 bits of address space.
901 */
902 for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
903 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i),
904 mem_access_subid.u64);
905 /* Set each SUBID to extend the addressable range */
906 mem_access_subid.s.ba += 1;
907 }
908
909 /*
910 * Disable the peer to peer forwarding register. This must be
911 * setup by the OS after it enumerates the bus and assigns
912 * addresses to the PCIe busses.
913 */
914 for (i = 0; i < 4; i++) {
915 cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
916 cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
917 }
918
919 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
920 cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
921
922 /*
923 * Disable Octeon's BAR1. It isn't needed in RC mode since
924 * BAR2 maps all of memory. BAR2 also maps 256MB-512MB into
925 * the 2nd 256MB of memory.
926 */
927 cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), -1);
928
929 /*
930 * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take
931 * precedence where they overlap. It also overlaps with the
932 * device addresses, so make sure the peer to peer forwarding
933 * is set right.
934 */
935 cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
936
937 /*
938 * Setup BAR2 attributes
939 *
940 * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
941 * - PTLP_RO,CTLP_RO should normally be set (except for debug).
942 * - WAIT_COM=0 will likely work for all applications.
943 *
944 * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]).
945 */
946 if (pcie_port) {
947 union cvmx_npei_ctl_port1 npei_ctl_port;
948 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
949 npei_ctl_port.s.bar2_enb = 1;
950 npei_ctl_port.s.bar2_esx = 1;
951 npei_ctl_port.s.bar2_cax = 0;
952 npei_ctl_port.s.ptlp_ro = 1;
953 npei_ctl_port.s.ctlp_ro = 1;
954 npei_ctl_port.s.wait_com = 0;
955 npei_ctl_port.s.waitl_com = 0;
956 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
957 } else {
958 union cvmx_npei_ctl_port0 npei_ctl_port;
959 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
960 npei_ctl_port.s.bar2_enb = 1;
961 npei_ctl_port.s.bar2_esx = 1;
962 npei_ctl_port.s.bar2_cax = 0;
963 npei_ctl_port.s.ptlp_ro = 1;
964 npei_ctl_port.s.ctlp_ro = 1;
965 npei_ctl_port.s.wait_com = 0;
966 npei_ctl_port.s.waitl_com = 0;
967 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
968 }
969 return 0;
970}
971
972
973/* Above was cvmx-pcie.c, below original pcie.c */
974
975
976/**
977 * Map a PCI device to the appropriate interrupt line
978 *
979 * @param dev The Linux PCI device structure for the device to map
980 * @param slot The slot number for this device on __BUS 0__. Linux
981 * enumerates through all the bridges and figures out the
982 * slot on Bus 0 where this device eventually hooks to.
983 * @param pin The PCI interrupt pin read from the device, then swizzled
984 * as it goes through each bridge.
985 * @return Interrupt number for the device
986 */
987int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
988 u8 slot, u8 pin)
989{
990 /*
991 * The EBH5600 board with the PCI to PCIe bridge mistakenly
992 * wires the first slot for both device id 2 and interrupt
993 * A. According to the PCI spec, device id 2 should be C. The
994 * following kludge attempts to fix this.
995 */
996 if (strstr(octeon_board_type_string(), "EBH5600") &&
997 dev->bus && dev->bus->parent) {
998 /*
999 * Iterate all the way up the device chain and find
1000 * the root bus.
1001 */
1002 while (dev->bus && dev->bus->parent)
1003 dev = to_pci_dev(dev->bus->bridge);
1004 /* If the root bus is number 0 and the PEX 8114 is the
1005 * root, assume we are behind the miswired bus. We
1006 * need to correct the swizzle level by two. Yuck.
1007 */
1008 if ((dev->bus->number == 0) &&
1009 (dev->vendor == 0x10b5) && (dev->device == 0x8114)) {
1010 /*
1011 * The pin field is one based, not zero. We
1012 * need to swizzle it by minus two.
1013 */
1014 pin = ((pin - 3) & 3) + 1;
1015 }
1016 }
1017 /*
1018 * The -1 is because pin starts with one, not zero. It might
1019 * be that this equation needs to include the slot number, but
1020 * I don't have hardware to check that against.
1021 */
1022 return pin - 1 + OCTEON_IRQ_PCI_INT0;
1023}
1024
1025/**
1026 * Read a value from configuration space
1027 *
1028 * @param bus
1029 * @param devfn
1030 * @param reg
1031 * @param size
1032 * @param val
1033 * @return
1034 */
1035static inline int octeon_pcie_read_config(int pcie_port, struct pci_bus *bus,
1036 unsigned int devfn, int reg, int size,
1037 u32 *val)
1038{
1039 union octeon_cvmemctl cvmmemctl;
1040 union octeon_cvmemctl cvmmemctl_save;
1041 int bus_number = bus->number;
1042
1043 /*
1044 * We need to force the bus number to be zero on the root
1045 * bus. Linux numbers the 2nd root bus to start after all
1046 * buses on root 0.
1047 */
1048 if (bus->parent == NULL)
1049 bus_number = 0;
1050
1051 /*
1052 * PCIe only has a single device connected to Octeon. It is
1053 * always device ID 0. Don't bother doing reads for other
1054 * device IDs on the first segment.
1055 */
1056 if ((bus_number == 0) && (devfn >> 3 != 0))
1057 return PCIBIOS_FUNC_NOT_SUPPORTED;
1058
1059 /*
1060 * The following is a workaround for the CN57XX, CN56XX,
1061 * CN55XX, and CN54XX errata with PCIe config reads from non
1062 * existent devices. These chips will hang the PCIe link if a
1063 * config read is performed that causes a UR response.
1064 */
1065 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
1066 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) {
1067 /*
1068 * For our EBH5600 board, port 0 has a bridge with two
1069 * PCI-X slots. We need a new special checks to make
1070 * sure we only probe valid stuff. The PCIe->PCI-X
1071 * bridge only respondes to device ID 0, function
1072 * 0-1
1073 */
1074 if ((bus_number == 0) && (devfn >= 2))
1075 return PCIBIOS_FUNC_NOT_SUPPORTED;
1076 /*
1077 * The PCI-X slots are device ID 2,3. Choose one of
1078 * the below "if" blocks based on what is plugged into
1079 * the board.
1080 */
1081#if 1
1082 /* Use this option if you aren't using either slot */
1083 if (bus_number == 1)
1084 return PCIBIOS_FUNC_NOT_SUPPORTED;
1085#elif 0
1086 /*
1087 * Use this option if you are using the first slot but
1088 * not the second.
1089 */
1090 if ((bus_number == 1) && (devfn >> 3 != 2))
1091 return PCIBIOS_FUNC_NOT_SUPPORTED;
1092#elif 0
1093 /*
1094 * Use this option if you are using the second slot
1095 * but not the first.
1096 */
1097 if ((bus_number == 1) && (devfn >> 3 != 3))
1098 return PCIBIOS_FUNC_NOT_SUPPORTED;
1099#elif 0
1100 /* Use this opion if you are using both slots */
1101 if ((bus_number == 1) &&
1102 !((devfn == (2 << 3)) || (devfn == (3 << 3))))
1103 return PCIBIOS_FUNC_NOT_SUPPORTED;
1104#endif
1105
1106 /*
1107 * Shorten the DID timeout so bus errors for PCIe
1108 * config reads from non existent devices happen
1109 * faster. This allows us to continue booting even if
1110 * the above "if" checks are wrong. Once one of these
1111 * errors happens, the PCIe port is dead.
1112 */
1113 cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7);
1114 cvmmemctl.u64 = cvmmemctl_save.u64;
1115 cvmmemctl.s.didtto = 2;
1116 __write_64bit_c0_register($11, 7, cvmmemctl.u64);
1117 }
1118
1119 switch (size) {
1120 case 4:
1121 *val = cvmx_pcie_config_read32(pcie_port, bus_number,
1122 devfn >> 3, devfn & 0x7, reg);
1123 break;
1124 case 2:
1125 *val = cvmx_pcie_config_read16(pcie_port, bus_number,
1126 devfn >> 3, devfn & 0x7, reg);
1127 break;
1128 case 1:
1129 *val = cvmx_pcie_config_read8(pcie_port, bus_number, devfn >> 3,
1130 devfn & 0x7, reg);
1131 break;
1132 default:
1133 return PCIBIOS_FUNC_NOT_SUPPORTED;
1134 }
1135
1136 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
1137 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1))
1138 __write_64bit_c0_register($11, 7, cvmmemctl_save.u64);
1139 return PCIBIOS_SUCCESSFUL;
1140}
1141
1142static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn,
1143 int reg, int size, u32 *val)
1144{
1145 return octeon_pcie_read_config(0, bus, devfn, reg, size, val);
1146}
1147
1148static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn,
1149 int reg, int size, u32 *val)
1150{
1151 return octeon_pcie_read_config(1, bus, devfn, reg, size, val);
1152}
1153
1154
1155
1156/**
1157 * Write a value to PCI configuration space
1158 *
1159 * @param bus
1160 * @param devfn
1161 * @param reg
1162 * @param size
1163 * @param val
1164 * @return
1165 */
1166static inline int octeon_pcie_write_config(int pcie_port, struct pci_bus *bus,
1167 unsigned int devfn, int reg,
1168 int size, u32 val)
1169{
1170 int bus_number = bus->number;
1171 /*
1172 * We need to force the bus number to be zero on the root
1173 * bus. Linux numbers the 2nd root bus to start after all
1174 * busses on root 0.
1175 */
1176 if (bus->parent == NULL)
1177 bus_number = 0;
1178
1179 switch (size) {
1180 case 4:
1181 cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3,
1182 devfn & 0x7, reg, val);
1183 return PCIBIOS_SUCCESSFUL;
1184 case 2:
1185 cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3,
1186 devfn & 0x7, reg, val);
1187 return PCIBIOS_SUCCESSFUL;
1188 case 1:
1189 cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3,
1190 devfn & 0x7, reg, val);
1191 return PCIBIOS_SUCCESSFUL;
1192 }
1193#if PCI_CONFIG_SPACE_DELAY
1194 udelay(PCI_CONFIG_SPACE_DELAY);
1195#endif
1196 return PCIBIOS_FUNC_NOT_SUPPORTED;
1197}
1198
1199static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn,
1200 int reg, int size, u32 val)
1201{
1202 return octeon_pcie_write_config(0, bus, devfn, reg, size, val);
1203}
1204
1205static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn,
1206 int reg, int size, u32 val)
1207{
1208 return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
1209}
1210
1211static struct pci_ops octeon_pcie0_ops = {
1212 octeon_pcie0_read_config,
1213 octeon_pcie0_write_config,
1214};
1215
1216static struct resource octeon_pcie0_mem_resource = {
1217 .name = "Octeon PCIe0 MEM",
1218 .flags = IORESOURCE_MEM,
1219};
1220
1221static struct resource octeon_pcie0_io_resource = {
1222 .name = "Octeon PCIe0 IO",
1223 .flags = IORESOURCE_IO,
1224};
1225
1226static struct pci_controller octeon_pcie0_controller = {
1227 .pci_ops = &octeon_pcie0_ops,
1228 .mem_resource = &octeon_pcie0_mem_resource,
1229 .io_resource = &octeon_pcie0_io_resource,
1230};
1231
1232static struct pci_ops octeon_pcie1_ops = {
1233 octeon_pcie1_read_config,
1234 octeon_pcie1_write_config,
1235};
1236
1237static struct resource octeon_pcie1_mem_resource = {
1238 .name = "Octeon PCIe1 MEM",
1239 .flags = IORESOURCE_MEM,
1240};
1241
1242static struct resource octeon_pcie1_io_resource = {
1243 .name = "Octeon PCIe1 IO",
1244 .flags = IORESOURCE_IO,
1245};
1246
1247static struct pci_controller octeon_pcie1_controller = {
1248 .pci_ops = &octeon_pcie1_ops,
1249 .mem_resource = &octeon_pcie1_mem_resource,
1250 .io_resource = &octeon_pcie1_io_resource,
1251};
1252
1253
1254/**
1255 * Initialize the Octeon PCIe controllers
1256 *
1257 * @return
1258 */
1259static int __init octeon_pcie_setup(void)
1260{
1261 union cvmx_npei_ctl_status npei_ctl_status;
1262 int result;
1263
1264 /* These chips don't have PCIe */
1265 if (!octeon_has_feature(OCTEON_FEATURE_PCIE))
1266 return 0;
1267
1268 /* Point pcibios_map_irq() to the PCIe version of it */
1269 octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq;
1270
1271 /* Use the PCIe based DMA mappings */
1272 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE;
1273
1274 /*
1275 * PCIe I/O range. It is based on port 0 but includes up until
1276 * port 1's end.
1277 */
1278 set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0)));
1279 ioport_resource.start = 0;
1280 ioport_resource.end =
1281 cvmx_pcie_get_io_base_address(1) -
1282 cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1;
1283
1284 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1285 if (npei_ctl_status.s.host_mode) {
1286 pr_notice("PCIe: Initializing port 0\n");
1287 result = cvmx_pcie_rc_initialize(0);
1288 if (result == 0) {
1289 /* Memory offsets are physical addresses */
1290 octeon_pcie0_controller.mem_offset =
1291 cvmx_pcie_get_mem_base_address(0);
1292 /* IO offsets are Mips virtual addresses */
1293 octeon_pcie0_controller.io_map_base =
1294 CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address
1295 (0));
1296 octeon_pcie0_controller.io_offset = 0;
1297 /*
1298 * To keep things similar to PCI, we start
1299 * device addresses at the same place as PCI
1300 * uisng big bar support. This normally
1301 * translates to 4GB-256MB, which is the same
1302 * as most x86 PCs.
1303 */
1304 octeon_pcie0_controller.mem_resource->start =
1305 cvmx_pcie_get_mem_base_address(0) +
1306 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
1307 octeon_pcie0_controller.mem_resource->end =
1308 cvmx_pcie_get_mem_base_address(0) +
1309 cvmx_pcie_get_mem_size(0) - 1;
1310 /*
1311 * Ports must be above 16KB for the ISA bus
1312 * filtering in the PCI-X to PCI bridge.
1313 */
1314 octeon_pcie0_controller.io_resource->start = 4 << 10;
1315 octeon_pcie0_controller.io_resource->end =
1316 cvmx_pcie_get_io_size(0) - 1;
1317 register_pci_controller(&octeon_pcie0_controller);
1318 }
1319 } else {
1320 pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n");
1321 }
1322
1323 /* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */
1324 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
1325 union cvmx_npei_dbg_data npei_dbg_data;
1326 npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
1327 if (npei_dbg_data.cn52xx.qlm0_link_width)
1328 return 0;
1329 }
1330
1331 pr_notice("PCIe: Initializing port 1\n");
1332 result = cvmx_pcie_rc_initialize(1);
1333 if (result == 0) {
1334 /* Memory offsets are physical addresses */
1335 octeon_pcie1_controller.mem_offset =
1336 cvmx_pcie_get_mem_base_address(1);
1337 /* IO offsets are Mips virtual addresses */
1338 octeon_pcie1_controller.io_map_base =
1339 CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(1));
1340 octeon_pcie1_controller.io_offset =
1341 cvmx_pcie_get_io_base_address(1) -
1342 cvmx_pcie_get_io_base_address(0);
1343 /*
1344 * To keep things similar to PCI, we start device
1345 * addresses at the same place as PCI uisng big bar
1346 * support. This normally translates to 4GB-256MB,
1347 * which is the same as most x86 PCs.
1348 */
1349 octeon_pcie1_controller.mem_resource->start =
1350 cvmx_pcie_get_mem_base_address(1) + (4ul << 30) -
1351 (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
1352 octeon_pcie1_controller.mem_resource->end =
1353 cvmx_pcie_get_mem_base_address(1) +
1354 cvmx_pcie_get_mem_size(1) - 1;
1355 /*
1356 * Ports must be above 16KB for the ISA bus filtering
1357 * in the PCI-X to PCI bridge.
1358 */
1359 octeon_pcie1_controller.io_resource->start =
1360 cvmx_pcie_get_io_base_address(1) -
1361 cvmx_pcie_get_io_base_address(0);
1362 octeon_pcie1_controller.io_resource->end =
1363 octeon_pcie1_controller.io_resource->start +
1364 cvmx_pcie_get_io_size(1) - 1;
1365 register_pci_controller(&octeon_pcie1_controller);
1366 }
1367 return 0;
1368}
1369
1370arch_initcall(octeon_pcie_setup);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-errata.h b/arch/mips/include/asm/octeon/cvmx-helper-errata.h
new file mode 100644
index 000000000000..5fc99189ff58
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-errata.h
@@ -0,0 +1,33 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_HELPER_ERRATA_H__
29#define __CVMX_HELPER_ERRATA_H__
30
31extern void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm);
32
33#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-jtag.h b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h
new file mode 100644
index 000000000000..29f016ddb895
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h
@@ -0,0 +1,43 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Helper utilities for qlm_jtag.
32 *
33 */
34
35#ifndef __CVMX_HELPER_JTAG_H__
36#define __CVMX_HELPER_JTAG_H__
37
38extern void cvmx_helper_qlm_jtag_init(void);
39extern uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data);
40extern void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits);
41extern void cvmx_helper_qlm_jtag_update(int qlm);
42
43#endif /* __CVMX_HELPER_JTAG_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 03fddfa3e928..e31e3fe14f8a 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -376,6 +376,18 @@ static inline uint64_t cvmx_get_cycle(void)
376} 376}
377 377
378/** 378/**
379 * Wait for the specified number of cycle
380 *
381 */
382static inline void cvmx_wait(uint64_t cycles)
383{
384 uint64_t done = cvmx_get_cycle() + cycles;
385
386 while (cvmx_get_cycle() < done)
387 ; /* Spin */
388}
389
390/**
379 * Reads a chip global cycle counter. This counts CPU cycles since 391 * Reads a chip global cycle counter. This counts CPU cycles since
380 * chip reset. The counter is 64 bit. 392 * chip reset. The counter is 64 bit.
381 * This register does not exist on CN38XX pass 1 silicion 393 * This register does not exist on CN38XX pass 1 silicion
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index edc676084cda..cac9b1a206fc 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -245,4 +245,6 @@ static inline uint32_t octeon_npi_read32(uint64_t address)
245 return cvmx_read64_uint32(address ^ 4); 245 return cvmx_read64_uint32(address ^ 4);
246} 246}
247 247
248extern struct cvmx_bootinfo *octeon_bootinfo;
249
248#endif /* __ASM_OCTEON_OCTEON_H */ 250#endif /* __ASM_OCTEON_OCTEON_H */