aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/cavium-octeon
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r--arch/mips/cavium-octeon/Kconfig85
-rw-r--r--arch/mips/cavium-octeon/Makefile16
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c58
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c32
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile13
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c586
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c734
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c116
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c358
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c84
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c497
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S521
-rw-r--r--arch/mips/cavium-octeon/serial.c136
-rw-r--r--arch/mips/cavium-octeon/setup.c927
-rw-r--r--arch/mips/cavium-octeon/smp.c211
15 files changed, 4374 insertions, 0 deletions
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
new file mode 100644
index 000000000000..094c17e38e16
--- /dev/null
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -0,0 +1,85 @@
1config CAVIUM_OCTEON_SPECIFIC_OPTIONS
2 bool "Enable Octeon specific options"
3 depends on CPU_CAVIUM_OCTEON
4 default "y"
5
6config CAVIUM_OCTEON_2ND_KERNEL
7 bool "Build the kernel to be used as a 2nd kernel on the same chip"
8 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
9 default "n"
10 help
11 This option configures this kernel to be linked at a different
12 address and use the 2nd uart for output. This allows a kernel built
13 with this option to be run at the same time as one built without this
14 option.
15
16config CAVIUM_OCTEON_HW_FIX_UNALIGNED
17 bool "Enable hardware fixups of unaligned loads and stores"
18 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
19 default "y"
20 help
21 Configure the Octeon hardware to automatically fix unaligned loads
22 and stores. Normally unaligned accesses are fixed using a kernel
23 exception handler. This option enables the hardware automatic fixups,
24 which requires only an extra 3 cycles. Disable this option if you
25 are running code that relies on address exceptions on unaligned
26 accesses.
27
28config CAVIUM_OCTEON_CVMSEG_SIZE
29 int "Number of L1 cache lines reserved for CVMSEG memory"
30 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
31 range 0 54
32 default 1
33 help
34 CVMSEG LM is a segment that accesses portions of the dcache as a
35 local memory; the larger CVMSEG is, the smaller the cache is.
36 This selects the size of CVMSEG LM, which is in cache blocks. The
37 legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
38 between zero and 6192 bytes).
39
40config CAVIUM_OCTEON_LOCK_L2
41 bool "Lock often used kernel code in the L2"
42 depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
43 default "y"
44 help
45 Enable locking parts of the kernel into the L2 cache.
46
47config CAVIUM_OCTEON_LOCK_L2_TLB
48 bool "Lock the TLB handler in L2"
49 depends on CAVIUM_OCTEON_LOCK_L2
50 default "y"
51 help
52 Lock the low level TLB fast path into L2.
53
54config CAVIUM_OCTEON_LOCK_L2_EXCEPTION
55 bool "Lock the exception handler in L2"
56 depends on CAVIUM_OCTEON_LOCK_L2
57 default "y"
58 help
59 Lock the low level exception handler into L2.
60
61config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
62 bool "Lock the interrupt handler in L2"
63 depends on CAVIUM_OCTEON_LOCK_L2
64 default "y"
65 help
66 Lock the low level interrupt handler into L2.
67
68config CAVIUM_OCTEON_LOCK_L2_INTERRUPT
69 bool "Lock the 2nd level interrupt handler in L2"
70 depends on CAVIUM_OCTEON_LOCK_L2
71 default "y"
72 help
73 Lock the 2nd level interrupt handler in L2.
74
75config CAVIUM_OCTEON_LOCK_L2_MEMCPY
76 bool "Lock memcpy() in L2"
77 depends on CAVIUM_OCTEON_LOCK_L2
78 default "y"
79 help
80 Lock the kernel's implementation of memcpy() into L2.
81
82config ARCH_SPARSEMEM_ENABLE
83 def_bool y
84 select SPARSEMEM_STATIC
85 depends on CPU_CAVIUM_OCTEON
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
new file mode 100644
index 000000000000..1c2a7faf5881
--- /dev/null
+++ b/arch/mips/cavium-octeon/Makefile
@@ -0,0 +1,16 @@
1#
2# Makefile for the Cavium Octeon specific kernel interface routines
3# under Linux.
4#
5# This file is subject to the terms and conditions of the GNU General Public
6# License. See the file "COPYING" in the main directory of this archive
7# for more details.
8#
9# Copyright (C) 2005-2008 Cavium Networks
10#
11
12obj-y := setup.o serial.o octeon-irq.o csrc-octeon.o
13obj-y += dma-octeon.o flash_setup.o
14obj-y += octeon-memcpy.o
15
16obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
new file mode 100644
index 000000000000..70fd92c31657
--- /dev/null
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -0,0 +1,58 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 by Ralf Baechle
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10
11#include <asm/time.h>
12
13#include <asm/octeon/octeon.h>
14#include <asm/octeon/cvmx-ipd-defs.h>
15
16/*
17 * Set the current core's cvmcount counter to the value of the
18 * IPD_CLK_COUNT. We do this on all cores as they are brought
19 * on-line. This allows for a read from a local cpu register to
20 * access a synchronized counter.
21 *
22 */
23void octeon_init_cvmcount(void)
24{
25 unsigned long flags;
26 unsigned loops = 2;
27
28 /* Clobber loops so GCC will not unroll the following while loop. */
29 asm("" : "+r" (loops));
30
31 local_irq_save(flags);
32 /*
33 * Loop several times so we are executing from the cache,
34 * which should give more deterministic timing.
35 */
36 while (loops--)
37 write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT));
38 local_irq_restore(flags);
39}
40
41static cycle_t octeon_cvmcount_read(void)
42{
43 return read_c0_cvmcount();
44}
45
46static struct clocksource clocksource_mips = {
47 .name = "OCTEON_CVMCOUNT",
48 .read = octeon_cvmcount_read,
49 .mask = CLOCKSOURCE_MASK(64),
50 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
51};
52
53void __init plat_time_init(void)
54{
55 clocksource_mips.rating = 300;
56 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
57 clocksource_register(&clocksource_mips);
58}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
new file mode 100644
index 000000000000..01b1ef94b361
--- /dev/null
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -0,0 +1,32 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya.
11 * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
12 * the kernels original.
13 */
14#include <linux/types.h>
15#include <linux/mm.h>
16
17#include <dma-coherence.h>
18
19dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
20{
21 /* Without PCI/PCIe this function can be called for Octeon internal
22 devices such as USB. These devices all support 64bit addressing */
23 mb();
24 return virt_to_phys(ptr);
25}
26
27void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
28{
29 /* Without PCI/PCIe this function can be called for Octeon internal
30 * devices such as USB. These devices all support 64bit addressing */
31 return;
32}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
new file mode 100644
index 000000000000..80d6cb26766b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the Cavium Octeon specific kernel interface routines
3# under Linux.
4#
5# This file is subject to the terms and conditions of the GNU General Public
6# License. See the file "COPYING" in the main directory of this archive
7# for more details.
8#
9# Copyright (C) 2005-2008 Cavium Networks
10#
11
12obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
13
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
new file mode 100644
index 000000000000..4f5a08b37ccd
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -0,0 +1,586 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Simple allocate only memory allocator. Used to allocate memory at
30 * application start time.
31 */
32
33#include <linux/kernel.h>
34
35#include <asm/octeon/cvmx.h>
36#include <asm/octeon/cvmx-spinlock.h>
37#include <asm/octeon/cvmx-bootmem.h>
38
39/*#define DEBUG */
40
41
42static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
43
44/* See header file for descriptions of functions */
45
46/*
47 * Wrapper functions are provided for reading/writing the size and
48 * next block values as these may not be directly addressible (in 32
49 * bit applications, for instance.) Offsets of data elements in
50 * bootmem list, must match cvmx_bootmem_block_header_t.
51 */
52#define NEXT_OFFSET 0
53#define SIZE_OFFSET 8
54
55static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
56{
57 cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
58}
59
60static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
61{
62 cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
63}
64
65static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
66{
67 return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
68}
69
70static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
71{
72 return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
73}
74
75void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
76 uint64_t min_addr, uint64_t max_addr)
77{
78 int64_t address;
79 address =
80 cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
81
82 if (address > 0)
83 return cvmx_phys_to_ptr(address);
84 else
85 return NULL;
86}
87
88void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
89 uint64_t alignment)
90{
91 return cvmx_bootmem_alloc_range(size, alignment, address,
92 address + size);
93}
94
95void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
96{
97 return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
98}
99
100int cvmx_bootmem_free_named(char *name)
101{
102 return cvmx_bootmem_phy_named_block_free(name, 0);
103}
104
105struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
106{
107 return cvmx_bootmem_phy_named_block_find(name, 0);
108}
109
110void cvmx_bootmem_lock(void)
111{
112 cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
113}
114
115void cvmx_bootmem_unlock(void)
116{
117 cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
118}
119
120int cvmx_bootmem_init(void *mem_desc_ptr)
121{
122 /* Here we set the global pointer to the bootmem descriptor
123 * block. This pointer will be used directly, so we will set
124 * it up to be directly usable by the application. It is set
125 * up as follows for the various runtime/ABI combinations:
126 *
127 * Linux 64 bit: Set XKPHYS bit
128 * Linux 32 bit: use mmap to create mapping, use virtual address
129 * CVMX 64 bit: use physical address directly
130 * CVMX 32 bit: use physical address directly
131 *
132 * Note that the CVMX environment assumes the use of 1-1 TLB
133 * mappings so that the physical addresses can be used
134 * directly
135 */
136 if (!cvmx_bootmem_desc) {
137#if defined(CVMX_ABI_64)
138 /* Set XKPHYS bit */
139 cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
140#else
141 cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
142#endif
143 }
144
145 return 0;
146}
147
148/*
149 * The cvmx_bootmem_phy* functions below return 64 bit physical
150 * addresses, and expose more features that the cvmx_bootmem_functions
151 * above. These are required for full memory space access in 32 bit
152 * applications, as well as for using some advance features. Most
153 * applications should not need to use these.
154 */
155
156int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
157 uint64_t address_max, uint64_t alignment,
158 uint32_t flags)
159{
160
161 uint64_t head_addr;
162 uint64_t ent_addr;
163 /* points to previous list entry, NULL current entry is head of list */
164 uint64_t prev_addr = 0;
165 uint64_t new_ent_addr = 0;
166 uint64_t desired_min_addr;
167
168#ifdef DEBUG
169 cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
170 "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
171 (unsigned long long)req_size,
172 (unsigned long long)address_min,
173 (unsigned long long)address_max,
174 (unsigned long long)alignment);
175#endif
176
177 if (cvmx_bootmem_desc->major_version > 3) {
178 cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
179 "version: %d.%d at addr: %p\n",
180 (int)cvmx_bootmem_desc->major_version,
181 (int)cvmx_bootmem_desc->minor_version,
182 cvmx_bootmem_desc);
183 goto error_out;
184 }
185
186 /*
187 * Do a variety of checks to validate the arguments. The
188 * allocator code will later assume that these checks have
189 * been made. We validate that the requested constraints are
190 * not self-contradictory before we look through the list of
191 * available memory.
192 */
193
194 /* 0 is not a valid req_size for this allocator */
195 if (!req_size)
196 goto error_out;
197
198 /* Round req_size up to mult of minimum alignment bytes */
199 req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
200 ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
201
202 /*
203 * Convert !0 address_min and 0 address_max to special case of
204 * range that specifies an exact memory block to allocate. Do
205 * this before other checks and adjustments so that this
206 * tranformation will be validated.
207 */
208 if (address_min && !address_max)
209 address_max = address_min + req_size;
210 else if (!address_min && !address_max)
211 address_max = ~0ull; /* If no limits given, use max limits */
212
213
214 /*
215 * Enforce minimum alignment (this also keeps the minimum free block
216 * req_size the same as the alignment req_size.
217 */
218 if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
219 alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
220
221 /*
222 * Adjust address minimum based on requested alignment (round
223 * up to meet alignment). Do this here so we can reject
224 * impossible requests up front. (NOP for address_min == 0)
225 */
226 if (alignment)
227 address_min = __ALIGN_MASK(address_min, (alignment - 1));
228
229 /*
230 * Reject inconsistent args. We have adjusted these, so this
231 * may fail due to our internal changes even if this check
232 * would pass for the values the user supplied.
233 */
234 if (req_size > address_max - address_min)
235 goto error_out;
236
237 /* Walk through the list entries - first fit found is returned */
238
239 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
240 cvmx_bootmem_lock();
241 head_addr = cvmx_bootmem_desc->head_addr;
242 ent_addr = head_addr;
243 for (; ent_addr;
244 prev_addr = ent_addr,
245 ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
246 uint64_t usable_base, usable_max;
247 uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
248
249 if (cvmx_bootmem_phy_get_next(ent_addr)
250 && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
251 cvmx_dprintf("Internal bootmem_alloc() error: ent: "
252 "0x%llx, next: 0x%llx\n",
253 (unsigned long long)ent_addr,
254 (unsigned long long)
255 cvmx_bootmem_phy_get_next(ent_addr));
256 goto error_out;
257 }
258
259 /*
260 * Determine if this is an entry that can satisify the
261 * request Check to make sure entry is large enough to
262 * satisfy request.
263 */
264 usable_base =
265 __ALIGN_MASK(max(address_min, ent_addr), alignment - 1);
266 usable_max = min(address_max, ent_addr + ent_size);
267 /*
268 * We should be able to allocate block at address
269 * usable_base.
270 */
271
272 desired_min_addr = usable_base;
273 /*
274 * Determine if request can be satisfied from the
275 * current entry.
276 */
277 if (!((ent_addr + ent_size) > usable_base
278 && ent_addr < address_max
279 && req_size <= usable_max - usable_base))
280 continue;
281 /*
282 * We have found an entry that has room to satisfy the
283 * request, so allocate it from this entry. If end
284 * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
285 * the end of this block rather than the beginning.
286 */
287 if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
288 desired_min_addr = usable_max - req_size;
289 /*
290 * Align desired address down to required
291 * alignment.
292 */
293 desired_min_addr &= ~(alignment - 1);
294 }
295
296 /* Match at start of entry */
297 if (desired_min_addr == ent_addr) {
298 if (req_size < ent_size) {
299 /*
300 * big enough to create a new block
301 * from top portion of block.
302 */
303 new_ent_addr = ent_addr + req_size;
304 cvmx_bootmem_phy_set_next(new_ent_addr,
305 cvmx_bootmem_phy_get_next(ent_addr));
306 cvmx_bootmem_phy_set_size(new_ent_addr,
307 ent_size -
308 req_size);
309
310 /*
311 * Adjust next pointer as following
312 * code uses this.
313 */
314 cvmx_bootmem_phy_set_next(ent_addr,
315 new_ent_addr);
316 }
317
318 /*
319 * adjust prev ptr or head to remove this
320 * entry from list.
321 */
322 if (prev_addr)
323 cvmx_bootmem_phy_set_next(prev_addr,
324 cvmx_bootmem_phy_get_next(ent_addr));
325 else
326 /*
327 * head of list being returned, so
328 * update head ptr.
329 */
330 cvmx_bootmem_desc->head_addr =
331 cvmx_bootmem_phy_get_next(ent_addr);
332
333 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
334 cvmx_bootmem_unlock();
335 return desired_min_addr;
336 }
337 /*
338 * block returned doesn't start at beginning of entry,
339 * so we know that we will be splitting a block off
340 * the front of this one. Create a new block from the
341 * beginning, add to list, and go to top of loop
342 * again.
343 *
344 * create new block from high portion of
345 * block, so that top block starts at desired
346 * addr.
347 */
348 new_ent_addr = desired_min_addr;
349 cvmx_bootmem_phy_set_next(new_ent_addr,
350 cvmx_bootmem_phy_get_next
351 (ent_addr));
352 cvmx_bootmem_phy_set_size(new_ent_addr,
353 cvmx_bootmem_phy_get_size
354 (ent_addr) -
355 (desired_min_addr -
356 ent_addr));
357 cvmx_bootmem_phy_set_size(ent_addr,
358 desired_min_addr - ent_addr);
359 cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
360 /* Loop again to handle actual alloc from new block */
361 }
362error_out:
363 /* We didn't find anything, so return error */
364 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
365 cvmx_bootmem_unlock();
366 return -1;
367}
368
369int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
370{
371 uint64_t cur_addr;
372 uint64_t prev_addr = 0; /* zero is invalid */
373 int retval = 0;
374
375#ifdef DEBUG
376 cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
377 (unsigned long long)phy_addr, (unsigned long long)size);
378#endif
379 if (cvmx_bootmem_desc->major_version > 3) {
380 cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
381 "version: %d.%d at addr: %p\n",
382 (int)cvmx_bootmem_desc->major_version,
383 (int)cvmx_bootmem_desc->minor_version,
384 cvmx_bootmem_desc);
385 return 0;
386 }
387
388 /* 0 is not a valid size for this allocator */
389 if (!size)
390 return 0;
391
392 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
393 cvmx_bootmem_lock();
394 cur_addr = cvmx_bootmem_desc->head_addr;
395 if (cur_addr == 0 || phy_addr < cur_addr) {
396 /* add at front of list - special case with changing head ptr */
397 if (cur_addr && phy_addr + size > cur_addr)
398 goto bootmem_free_done; /* error, overlapping section */
399 else if (phy_addr + size == cur_addr) {
400 /* Add to front of existing first block */
401 cvmx_bootmem_phy_set_next(phy_addr,
402 cvmx_bootmem_phy_get_next
403 (cur_addr));
404 cvmx_bootmem_phy_set_size(phy_addr,
405 cvmx_bootmem_phy_get_size
406 (cur_addr) + size);
407 cvmx_bootmem_desc->head_addr = phy_addr;
408
409 } else {
410 /* New block before first block. OK if cur_addr is 0 */
411 cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
412 cvmx_bootmem_phy_set_size(phy_addr, size);
413 cvmx_bootmem_desc->head_addr = phy_addr;
414 }
415 retval = 1;
416 goto bootmem_free_done;
417 }
418
419 /* Find place in list to add block */
420 while (cur_addr && phy_addr > cur_addr) {
421 prev_addr = cur_addr;
422 cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
423 }
424
425 if (!cur_addr) {
426 /*
427 * We have reached the end of the list, add on to end,
428 * checking to see if we need to combine with last
429 * block
430 */
431 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
432 phy_addr) {
433 cvmx_bootmem_phy_set_size(prev_addr,
434 cvmx_bootmem_phy_get_size
435 (prev_addr) + size);
436 } else {
437 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
438 cvmx_bootmem_phy_set_size(phy_addr, size);
439 cvmx_bootmem_phy_set_next(phy_addr, 0);
440 }
441 retval = 1;
442 goto bootmem_free_done;
443 } else {
444 /*
445 * insert between prev and cur nodes, checking for
446 * merge with either/both.
447 */
448 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
449 phy_addr) {
450 /* Merge with previous */
451 cvmx_bootmem_phy_set_size(prev_addr,
452 cvmx_bootmem_phy_get_size
453 (prev_addr) + size);
454 if (phy_addr + size == cur_addr) {
455 /* Also merge with current */
456 cvmx_bootmem_phy_set_size(prev_addr,
457 cvmx_bootmem_phy_get_size(cur_addr) +
458 cvmx_bootmem_phy_get_size(prev_addr));
459 cvmx_bootmem_phy_set_next(prev_addr,
460 cvmx_bootmem_phy_get_next(cur_addr));
461 }
462 retval = 1;
463 goto bootmem_free_done;
464 } else if (phy_addr + size == cur_addr) {
465 /* Merge with current */
466 cvmx_bootmem_phy_set_size(phy_addr,
467 cvmx_bootmem_phy_get_size
468 (cur_addr) + size);
469 cvmx_bootmem_phy_set_next(phy_addr,
470 cvmx_bootmem_phy_get_next
471 (cur_addr));
472 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
473 retval = 1;
474 goto bootmem_free_done;
475 }
476
477 /* It is a standalone block, add in between prev and cur */
478 cvmx_bootmem_phy_set_size(phy_addr, size);
479 cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
480 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
481
482 }
483 retval = 1;
484
485bootmem_free_done:
486 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
487 cvmx_bootmem_unlock();
488 return retval;
489
490}
491
492struct cvmx_bootmem_named_block_desc *
493 cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
494{
495 unsigned int i;
496 struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
497
498#ifdef DEBUG
499 cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
500#endif
501 /*
502 * Lock the structure to make sure that it is not being
503 * changed while we are examining it.
504 */
505 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
506 cvmx_bootmem_lock();
507
508 /* Use XKPHYS for 64 bit linux */
509 named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
510 cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
511
512#ifdef DEBUG
513 cvmx_dprintf
514 ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
515 named_block_array_ptr);
516#endif
517 if (cvmx_bootmem_desc->major_version == 3) {
518 for (i = 0;
519 i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
520 if ((name && named_block_array_ptr[i].size
521 && !strncmp(name, named_block_array_ptr[i].name,
522 cvmx_bootmem_desc->named_block_name_len
523 - 1))
524 || (!name && !named_block_array_ptr[i].size)) {
525 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
526 cvmx_bootmem_unlock();
527
528 return &(named_block_array_ptr[i]);
529 }
530 }
531 } else {
532 cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
533 "version: %d.%d at addr: %p\n",
534 (int)cvmx_bootmem_desc->major_version,
535 (int)cvmx_bootmem_desc->minor_version,
536 cvmx_bootmem_desc);
537 }
538 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
539 cvmx_bootmem_unlock();
540
541 return NULL;
542}
543
544int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
545{
546 struct cvmx_bootmem_named_block_desc *named_block_ptr;
547
548 if (cvmx_bootmem_desc->major_version != 3) {
549 cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
550 "%d.%d at addr: %p\n",
551 (int)cvmx_bootmem_desc->major_version,
552 (int)cvmx_bootmem_desc->minor_version,
553 cvmx_bootmem_desc);
554 return 0;
555 }
556#ifdef DEBUG
557 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
558#endif
559
560 /*
561 * Take lock here, as name lookup/block free/name free need to
562 * be atomic.
563 */
564 cvmx_bootmem_lock();
565
566 named_block_ptr =
567 cvmx_bootmem_phy_named_block_find(name,
568 CVMX_BOOTMEM_FLAG_NO_LOCKING);
569 if (named_block_ptr) {
570#ifdef DEBUG
571 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
572 "%s, base: 0x%llx, size: 0x%llx\n",
573 name,
574 (unsigned long long)named_block_ptr->base_addr,
575 (unsigned long long)named_block_ptr->size);
576#endif
577 __cvmx_bootmem_phy_free(named_block_ptr->base_addr,
578 named_block_ptr->size,
579 CVMX_BOOTMEM_FLAG_NO_LOCKING);
580 named_block_ptr->size = 0;
581 /* Set size to zero to indicate block not used. */
582 }
583
584 cvmx_bootmem_unlock();
585 return named_block_ptr != NULL; /* 0 on failure, 1 on success */
586}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
new file mode 100644
index 000000000000..6abe56f1e097
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -0,0 +1,734 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Implementation of the Level 2 Cache (L2C) control, measurement, and
30 * debugging facilities.
31 */
32
33#include <asm/octeon/cvmx.h>
34#include <asm/octeon/cvmx-l2c.h>
35#include <asm/octeon/cvmx-spinlock.h>
36
37/*
38 * This spinlock is used internally to ensure that only one core is
39 * performing certain L2 operations at a time.
40 *
41 * NOTE: This only protects calls from within a single application -
42 * if multiple applications or operating systems are running, then it
43 * is up to the user program to coordinate between them.
44 */
45static cvmx_spinlock_t cvmx_l2c_spinlock;
46
47static inline int l2_size_half(void)
48{
49 uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
50 return !!(val & (1ull << 34));
51}
52
53int cvmx_l2c_get_core_way_partition(uint32_t core)
54{
55 uint32_t field;
56
57 /* Validate the core number */
58 if (core >= cvmx_octeon_num_cores())
59 return -1;
60
61 /*
62 * Use the lower two bits of the coreNumber to determine the
63 * bit offset of the UMSK[] field in the L2C_SPAR register.
64 */
65 field = (core & 0x3) * 8;
66
67 /*
68 * Return the UMSK[] field from the appropriate L2C_SPAR
69 * register based on the coreNumber.
70 */
71
72 switch (core & 0xC) {
73 case 0x0:
74 return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
75 field;
76 case 0x4:
77 return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
78 field;
79 case 0x8:
80 return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
81 field;
82 case 0xC:
83 return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
84 field;
85 }
86 return 0;
87}
88
89int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
90{
91 uint32_t field;
92 uint32_t valid_mask;
93
94 valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
95
96 mask &= valid_mask;
97
98 /* A UMSK setting which blocks all L2C Ways is an error. */
99 if (mask == valid_mask)
100 return -1;
101
102 /* Validate the core number */
103 if (core >= cvmx_octeon_num_cores())
104 return -1;
105
106 /* Check to make sure current mask & new mask don't block all ways */
107 if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
108 valid_mask)
109 return -1;
110
111 /* Use the lower two bits of core to determine the bit offset of the
112 * UMSK[] field in the L2C_SPAR register.
113 */
114 field = (core & 0x3) * 8;
115
116 /* Assign the new mask setting to the UMSK[] field in the appropriate
117 * L2C_SPAR register based on the core_num.
118 *
119 */
120 switch (core & 0xC) {
121 case 0x0:
122 cvmx_write_csr(CVMX_L2C_SPAR0,
123 (cvmx_read_csr(CVMX_L2C_SPAR0) &
124 ~(0xFF << field)) | mask << field);
125 break;
126 case 0x4:
127 cvmx_write_csr(CVMX_L2C_SPAR1,
128 (cvmx_read_csr(CVMX_L2C_SPAR1) &
129 ~(0xFF << field)) | mask << field);
130 break;
131 case 0x8:
132 cvmx_write_csr(CVMX_L2C_SPAR2,
133 (cvmx_read_csr(CVMX_L2C_SPAR2) &
134 ~(0xFF << field)) | mask << field);
135 break;
136 case 0xC:
137 cvmx_write_csr(CVMX_L2C_SPAR3,
138 (cvmx_read_csr(CVMX_L2C_SPAR3) &
139 ~(0xFF << field)) | mask << field);
140 break;
141 }
142 return 0;
143}
144
145int cvmx_l2c_set_hw_way_partition(uint32_t mask)
146{
147 uint32_t valid_mask;
148
149 valid_mask = 0xff;
150
151 if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
152 if (l2_size_half())
153 valid_mask = 0xf;
154 } else if (l2_size_half())
155 valid_mask = 0x3;
156
157 mask &= valid_mask;
158
159 /* A UMSK setting which blocks all L2C Ways is an error. */
160 if (mask == valid_mask)
161 return -1;
162 /* Check to make sure current mask & new mask don't block all ways */
163 if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
164 valid_mask)
165 return -1;
166
167 cvmx_write_csr(CVMX_L2C_SPAR4,
168 (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
169 return 0;
170}
171
172int cvmx_l2c_get_hw_way_partition(void)
173{
174 return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
175}
176
177void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
178 uint32_t clear_on_read)
179{
180 union cvmx_l2c_pfctl pfctl;
181
182 pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
183
184 switch (counter) {
185 case 0:
186 pfctl.s.cnt0sel = event;
187 pfctl.s.cnt0ena = 1;
188 if (!cvmx_octeon_is_pass1())
189 pfctl.s.cnt0rdclr = clear_on_read;
190 break;
191 case 1:
192 pfctl.s.cnt1sel = event;
193 pfctl.s.cnt1ena = 1;
194 if (!cvmx_octeon_is_pass1())
195 pfctl.s.cnt1rdclr = clear_on_read;
196 break;
197 case 2:
198 pfctl.s.cnt2sel = event;
199 pfctl.s.cnt2ena = 1;
200 if (!cvmx_octeon_is_pass1())
201 pfctl.s.cnt2rdclr = clear_on_read;
202 break;
203 case 3:
204 default:
205 pfctl.s.cnt3sel = event;
206 pfctl.s.cnt3ena = 1;
207 if (!cvmx_octeon_is_pass1())
208 pfctl.s.cnt3rdclr = clear_on_read;
209 break;
210 }
211
212 cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
213}
214
215uint64_t cvmx_l2c_read_perf(uint32_t counter)
216{
217 switch (counter) {
218 case 0:
219 return cvmx_read_csr(CVMX_L2C_PFC0);
220 case 1:
221 return cvmx_read_csr(CVMX_L2C_PFC1);
222 case 2:
223 return cvmx_read_csr(CVMX_L2C_PFC2);
224 case 3:
225 default:
226 return cvmx_read_csr(CVMX_L2C_PFC3);
227 }
228}
229
230/**
231 * @INTERNAL
232 * Helper function use to fault in cache lines for L2 cache locking
233 *
234 * @addr: Address of base of memory region to read into L2 cache
235 * @len: Length (in bytes) of region to fault in
236 */
237static void fault_in(uint64_t addr, int len)
238{
239 volatile char *ptr;
240 volatile char dummy;
241 /*
242 * Adjust addr and length so we get all cache lines even for
243 * small ranges spanning two cache lines
244 */
245 len += addr & CVMX_CACHE_LINE_MASK;
246 addr &= ~CVMX_CACHE_LINE_MASK;
247 ptr = (volatile char *)cvmx_phys_to_ptr(addr);
248 /*
249 * Invalidate L1 cache to make sure all loads result in data
250 * being in L2.
251 */
252 CVMX_DCACHE_INVALIDATE;
253 while (len > 0) {
254 dummy += *ptr;
255 len -= CVMX_CACHE_LINE_SIZE;
256 ptr += CVMX_CACHE_LINE_SIZE;
257 }
258}
259
260int cvmx_l2c_lock_line(uint64_t addr)
261{
262 int retval = 0;
263 union cvmx_l2c_dbg l2cdbg;
264 union cvmx_l2c_lckbase lckbase;
265 union cvmx_l2c_lckoff lckoff;
266 union cvmx_l2t_err l2t_err;
267 l2cdbg.u64 = 0;
268 lckbase.u64 = 0;
269 lckoff.u64 = 0;
270
271 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
272
273 /* Clear l2t error bits if set */
274 l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
275 l2t_err.s.lckerr = 1;
276 l2t_err.s.lckerr2 = 1;
277 cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
278
279 addr &= ~CVMX_CACHE_LINE_MASK;
280
281 /* Set this core as debug core */
282 l2cdbg.s.ppnum = cvmx_get_core_num();
283 CVMX_SYNC;
284 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
285 cvmx_read_csr(CVMX_L2C_DBG);
286
287 lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
288 cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
289 cvmx_read_csr(CVMX_L2C_LCKOFF);
290
291 if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
292 int alias_shift =
293 CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
294 uint64_t addr_tmp =
295 addr ^ (addr & ((1 << alias_shift) - 1)) >>
296 CVMX_L2_SET_BITS;
297 lckbase.s.lck_base = addr_tmp >> 7;
298 } else {
299 lckbase.s.lck_base = addr >> 7;
300 }
301
302 lckbase.s.lck_ena = 1;
303 cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
304 cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
305
306 fault_in(addr, CVMX_CACHE_LINE_SIZE);
307
308 lckbase.s.lck_ena = 0;
309 cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
310 cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
311
312 /* Stop being debug core */
313 cvmx_write_csr(CVMX_L2C_DBG, 0);
314 cvmx_read_csr(CVMX_L2C_DBG);
315
316 l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
317 if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
318 retval = 1; /* We were unable to lock the line */
319
320 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
321
322 return retval;
323}
324
325int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
326{
327 int retval = 0;
328
329 /* Round start/end to cache line boundaries */
330 len += start & CVMX_CACHE_LINE_MASK;
331 start &= ~CVMX_CACHE_LINE_MASK;
332 len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
333
334 while (len) {
335 retval += cvmx_l2c_lock_line(start);
336 start += CVMX_CACHE_LINE_SIZE;
337 len -= CVMX_CACHE_LINE_SIZE;
338 }
339
340 return retval;
341}
342
343void cvmx_l2c_flush(void)
344{
345 uint64_t assoc, set;
346 uint64_t n_assoc, n_set;
347 union cvmx_l2c_dbg l2cdbg;
348
349 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
350
351 l2cdbg.u64 = 0;
352 if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
353 l2cdbg.s.ppnum = cvmx_get_core_num();
354 l2cdbg.s.finv = 1;
355 n_set = CVMX_L2_SETS;
356 n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
357 for (set = 0; set < n_set; set++) {
358 for (assoc = 0; assoc < n_assoc; assoc++) {
359 l2cdbg.s.set = assoc;
360 /* Enter debug mode, and make sure all other
361 ** writes complete before we enter debug
362 ** mode */
363 CVMX_SYNCW;
364 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
365 cvmx_read_csr(CVMX_L2C_DBG);
366
367 CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
368 (CVMX_MIPS_SPACE_XKPHYS,
369 set * CVMX_CACHE_LINE_SIZE), 0);
370 CVMX_SYNCW; /* Push STF out to L2 */
371 /* Exit debug mode */
372 CVMX_SYNC;
373 cvmx_write_csr(CVMX_L2C_DBG, 0);
374 cvmx_read_csr(CVMX_L2C_DBG);
375 }
376 }
377
378 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
379}
380
381int cvmx_l2c_unlock_line(uint64_t address)
382{
383 int assoc;
384 union cvmx_l2c_tag tag;
385 union cvmx_l2c_dbg l2cdbg;
386 uint32_t tag_addr;
387
388 uint32_t index = cvmx_l2c_address_to_index(address);
389
390 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
391 /* Compute portion of address that is stored in tag */
392 tag_addr =
393 ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
394 ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
395 for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
396 tag = cvmx_get_l2c_tag(assoc, index);
397
398 if (tag.s.V && (tag.s.addr == tag_addr)) {
399 l2cdbg.u64 = 0;
400 l2cdbg.s.ppnum = cvmx_get_core_num();
401 l2cdbg.s.set = assoc;
402 l2cdbg.s.finv = 1;
403
404 CVMX_SYNC;
405 /* Enter debug mode */
406 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
407 cvmx_read_csr(CVMX_L2C_DBG);
408
409 CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
410 (CVMX_MIPS_SPACE_XKPHYS,
411 address), 0);
412 CVMX_SYNC;
413 /* Exit debug mode */
414 cvmx_write_csr(CVMX_L2C_DBG, 0);
415 cvmx_read_csr(CVMX_L2C_DBG);
416 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
417 return tag.s.L;
418 }
419 }
420 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
421 return 0;
422}
423
424int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
425{
426 int num_unlocked = 0;
427 /* Round start/end to cache line boundaries */
428 len += start & CVMX_CACHE_LINE_MASK;
429 start &= ~CVMX_CACHE_LINE_MASK;
430 len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
431 while (len > 0) {
432 num_unlocked += cvmx_l2c_unlock_line(start);
433 start += CVMX_CACHE_LINE_SIZE;
434 len -= CVMX_CACHE_LINE_SIZE;
435 }
436
437 return num_unlocked;
438}
439
440/*
441 * Internal l2c tag types. These are converted to a generic structure
442 * that can be used on all chips.
443 */
444union __cvmx_l2c_tag {
445 uint64_t u64;
446 struct cvmx_l2c_tag_cn50xx {
447 uint64_t reserved:40;
448 uint64_t V:1; /* Line valid */
449 uint64_t D:1; /* Line dirty */
450 uint64_t L:1; /* Line locked */
451 uint64_t U:1; /* Use, LRU eviction */
452 uint64_t addr:20; /* Phys mem addr (33..14) */
453 } cn50xx;
454 struct cvmx_l2c_tag_cn30xx {
455 uint64_t reserved:41;
456 uint64_t V:1; /* Line valid */
457 uint64_t D:1; /* Line dirty */
458 uint64_t L:1; /* Line locked */
459 uint64_t U:1; /* Use, LRU eviction */
460 uint64_t addr:19; /* Phys mem addr (33..15) */
461 } cn30xx;
462 struct cvmx_l2c_tag_cn31xx {
463 uint64_t reserved:42;
464 uint64_t V:1; /* Line valid */
465 uint64_t D:1; /* Line dirty */
466 uint64_t L:1; /* Line locked */
467 uint64_t U:1; /* Use, LRU eviction */
468 uint64_t addr:18; /* Phys mem addr (33..16) */
469 } cn31xx;
470 struct cvmx_l2c_tag_cn38xx {
471 uint64_t reserved:43;
472 uint64_t V:1; /* Line valid */
473 uint64_t D:1; /* Line dirty */
474 uint64_t L:1; /* Line locked */
475 uint64_t U:1; /* Use, LRU eviction */
476 uint64_t addr:17; /* Phys mem addr (33..17) */
477 } cn38xx;
478 struct cvmx_l2c_tag_cn58xx {
479 uint64_t reserved:44;
480 uint64_t V:1; /* Line valid */
481 uint64_t D:1; /* Line dirty */
482 uint64_t L:1; /* Line locked */
483 uint64_t U:1; /* Use, LRU eviction */
484 uint64_t addr:16; /* Phys mem addr (33..18) */
485 } cn58xx;
486 struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
487 struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
488};
489
490/**
491 * @INTERNAL
492 * Function to read a L2C tag. This code make the current core
493 * the 'debug core' for the L2. This code must only be executed by
494 * 1 core at a time.
495 *
496 * @assoc: Association (way) of the tag to dump
497 * @index: Index of the cacheline
498 *
499 * Returns The Octeon model specific tag structure. This is
500 * translated by a wrapper function to a generic form that is
501 * easier for applications to use.
502 */
503static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
504{
505
506 uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
507 uint64_t core = cvmx_get_core_num();
508 union __cvmx_l2c_tag tag_val;
509 uint64_t dbg_addr = CVMX_L2C_DBG;
510 unsigned long flags;
511
512 union cvmx_l2c_dbg debug_val;
513 debug_val.u64 = 0;
514 /*
515 * For low core count parts, the core number is always small enough
516 * to stay in the correct field and not set any reserved bits.
517 */
518 debug_val.s.ppnum = core;
519 debug_val.s.l2t = 1;
520 debug_val.s.set = assoc;
521 /*
522 * Make sure core is quiet (no prefetches, etc.) before
523 * entering debug mode.
524 */
525 CVMX_SYNC;
526 /* Flush L1 to make sure debug load misses L1 */
527 CVMX_DCACHE_INVALIDATE;
528
529 local_irq_save(flags);
530
531 /*
532 * The following must be done in assembly as when in debug
533 * mode all data loads from L2 return special debug data, not
534 * normal memory contents. Also, interrupts must be
535 * disabled, since if an interrupt occurs while in debug mode
536 * the ISR will get debug data from all its memory reads
537 * instead of the contents of memory
538 */
539
540 asm volatile (".set push \n"
541 " .set mips64 \n"
542 " .set noreorder \n"
543 /* Enter debug mode, wait for store */
544 " sd %[dbg_val], 0(%[dbg_addr]) \n"
545 " ld $0, 0(%[dbg_addr]) \n"
546 /* Read L2C tag data */
547 " ld %[tag_val], 0(%[tag_addr]) \n"
548 /* Exit debug mode, wait for store */
549 " sd $0, 0(%[dbg_addr]) \n"
550 " ld $0, 0(%[dbg_addr]) \n"
551 /* Invalidate dcache to discard debug data */
552 " cache 9, 0($0) \n"
553 " .set pop" :
554 [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
555 [dbg_val] "r"(debug_val.u64),
556 [tag_addr] "r"(debug_tag_addr) : "memory");
557
558 local_irq_restore(flags);
559 return tag_val;
560
561}
562
563union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
564{
565 union __cvmx_l2c_tag tmp_tag;
566 union cvmx_l2c_tag tag;
567 tag.u64 = 0;
568
569 if ((int)association >= cvmx_l2c_get_num_assoc()) {
570 cvmx_dprintf
571 ("ERROR: cvmx_get_l2c_tag association out of range\n");
572 return tag;
573 }
574 if ((int)index >= cvmx_l2c_get_num_sets()) {
575 cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
576 "index out of range (arg: %d, max: %d\n",
577 index, cvmx_l2c_get_num_sets());
578 return tag;
579 }
580 /* __read_l2_tag is intended for internal use only */
581 tmp_tag = __read_l2_tag(association, index);
582
583 /*
584 * Convert all tag structure types to generic version, as it
585 * can represent all models.
586 */
587 if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
588 tag.s.V = tmp_tag.cn58xx.V;
589 tag.s.D = tmp_tag.cn58xx.D;
590 tag.s.L = tmp_tag.cn58xx.L;
591 tag.s.U = tmp_tag.cn58xx.U;
592 tag.s.addr = tmp_tag.cn58xx.addr;
593 } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
594 tag.s.V = tmp_tag.cn38xx.V;
595 tag.s.D = tmp_tag.cn38xx.D;
596 tag.s.L = tmp_tag.cn38xx.L;
597 tag.s.U = tmp_tag.cn38xx.U;
598 tag.s.addr = tmp_tag.cn38xx.addr;
599 } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
600 || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
601 tag.s.V = tmp_tag.cn31xx.V;
602 tag.s.D = tmp_tag.cn31xx.D;
603 tag.s.L = tmp_tag.cn31xx.L;
604 tag.s.U = tmp_tag.cn31xx.U;
605 tag.s.addr = tmp_tag.cn31xx.addr;
606 } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
607 tag.s.V = tmp_tag.cn30xx.V;
608 tag.s.D = tmp_tag.cn30xx.D;
609 tag.s.L = tmp_tag.cn30xx.L;
610 tag.s.U = tmp_tag.cn30xx.U;
611 tag.s.addr = tmp_tag.cn30xx.addr;
612 } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
613 tag.s.V = tmp_tag.cn50xx.V;
614 tag.s.D = tmp_tag.cn50xx.D;
615 tag.s.L = tmp_tag.cn50xx.L;
616 tag.s.U = tmp_tag.cn50xx.U;
617 tag.s.addr = tmp_tag.cn50xx.addr;
618 } else {
619 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
620 }
621
622 return tag;
623}
624
625uint32_t cvmx_l2c_address_to_index(uint64_t addr)
626{
627 uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
628 union cvmx_l2c_cfg l2c_cfg;
629 l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
630
631 if (l2c_cfg.s.idxalias) {
632 idx ^=
633 ((addr & CVMX_L2C_ALIAS_MASK) >>
634 CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
635 }
636 idx &= CVMX_L2C_IDX_MASK;
637 return idx;
638}
639
640int cvmx_l2c_get_cache_size_bytes(void)
641{
642 return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
643 CVMX_CACHE_LINE_SIZE;
644}
645
646/**
647 * Return log base 2 of the number of sets in the L2 cache
648 * Returns
649 */
650int cvmx_l2c_get_set_bits(void)
651{
652 int l2_set_bits;
653 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
654 l2_set_bits = 11; /* 2048 sets */
655 else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
656 l2_set_bits = 10; /* 1024 sets */
657 else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
658 || OCTEON_IS_MODEL(OCTEON_CN52XX))
659 l2_set_bits = 9; /* 512 sets */
660 else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
661 l2_set_bits = 8; /* 256 sets */
662 else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
663 l2_set_bits = 7; /* 128 sets */
664 else {
665 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
666 l2_set_bits = 11; /* 2048 sets */
667 }
668 return l2_set_bits;
669
670}
671
672/* Return the number of sets in the L2 Cache */
673int cvmx_l2c_get_num_sets(void)
674{
675 return 1 << cvmx_l2c_get_set_bits();
676}
677
678/* Return the number of associations in the L2 Cache */
679int cvmx_l2c_get_num_assoc(void)
680{
681 int l2_assoc;
682 if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
683 OCTEON_IS_MODEL(OCTEON_CN52XX) ||
684 OCTEON_IS_MODEL(OCTEON_CN58XX) ||
685 OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
686 l2_assoc = 8;
687 else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
688 OCTEON_IS_MODEL(OCTEON_CN30XX))
689 l2_assoc = 4;
690 else {
691 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
692 l2_assoc = 8;
693 }
694
695 /* Check to see if part of the cache is disabled */
696 if (cvmx_fuse_read(265))
697 l2_assoc = l2_assoc >> 2;
698 else if (cvmx_fuse_read(264))
699 l2_assoc = l2_assoc >> 1;
700
701 return l2_assoc;
702}
703
704/**
705 * Flush a line from the L2 cache
706 * This should only be called from one core at a time, as this routine
707 * sets the core to the 'debug' core in order to flush the line.
708 *
709 * @assoc: Association (or way) to flush
710 * @index: Index to flush
711 */
712void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
713{
714 union cvmx_l2c_dbg l2cdbg;
715
716 l2cdbg.u64 = 0;
717 l2cdbg.s.ppnum = cvmx_get_core_num();
718 l2cdbg.s.finv = 1;
719
720 l2cdbg.s.set = assoc;
721 /*
722 * Enter debug mode, and make sure all other writes complete
723 * before we enter debug mode.
724 */
725 asm volatile ("sync" : : : "memory");
726 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
727 cvmx_read_csr(CVMX_L2C_DBG);
728
729 CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
730 /* Exit debug mode */
731 asm volatile ("sync" : : : "memory");
732 cvmx_write_csr(CVMX_L2C_DBG, 0);
733 cvmx_read_csr(CVMX_L2C_DBG);
734}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
new file mode 100644
index 000000000000..4812370706a1
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -0,0 +1,116 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * This module provides system/board/application information obtained
30 * by the bootloader.
31 */
32
33#include <asm/octeon/cvmx.h>
34#include <asm/octeon/cvmx-spinlock.h>
35#include <asm/octeon/cvmx-sysinfo.h>
36
37/**
38 * This structure defines the private state maintained by sysinfo module.
39 *
40 */
41static struct {
42 struct cvmx_sysinfo sysinfo; /* system information */
43 cvmx_spinlock_t lock; /* mutex spinlock */
44
45} state = {
46 .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
47};
48
49
50/*
51 * Global variables that define the min/max of the memory region set
52 * up for 32 bit userspace access.
53 */
54uint64_t linux_mem32_min;
55uint64_t linux_mem32_max;
56uint64_t linux_mem32_wired;
57uint64_t linux_mem32_offset;
58
59/**
60 * This function returns the application information as obtained
61 * by the bootloader. This provides the core mask of the cores
62 * running the same application image, as well as the physical
63 * memory regions available to the core.
64 *
65 * Returns Pointer to the boot information structure
66 *
67 */
68struct cvmx_sysinfo *cvmx_sysinfo_get(void)
69{
70 return &(state.sysinfo);
71}
72
73/**
74 * This function is used in non-simple executive environments (such as
75 * Linux kernel, u-boot, etc.) to configure the minimal fields that
76 * are required to use simple executive files directly.
77 *
78 * Locking (if required) must be handled outside of this
79 * function
80 *
81 * @phy_mem_desc_ptr:
82 * Pointer to global physical memory descriptor
83 * (bootmem descriptor) @board_type: Octeon board
84 * type enumeration
85 *
86 * @board_rev_major:
87 * Board major revision
88 * @board_rev_minor:
89 * Board minor revision
90 * @cpu_clock_hz:
91 * CPU clock freqency in hertz
92 *
93 * Returns 0: Failure
94 * 1: success
95 */
96int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
97 uint16_t board_type,
98 uint8_t board_rev_major,
99 uint8_t board_rev_minor,
100 uint32_t cpu_clock_hz)
101{
102
103 /* The sysinfo structure was already initialized */
104 if (state.sysinfo.board_type)
105 return 0;
106
107 memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
108 state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
109 state.sysinfo.board_type = board_type;
110 state.sysinfo.board_rev_major = board_rev_major;
111 state.sysinfo.board_rev_minor = board_rev_minor;
112 state.sysinfo.cpu_clock_hz = cpu_clock_hz;
113
114 return 1;
115}
116
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
new file mode 100644
index 000000000000..9afc3794ed1b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -0,0 +1,358 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * File defining functions for working with different Octeon
30 * models.
31 */
32#include <asm/octeon/octeon.h>
33
34/**
35 * Given the chip processor ID from COP0, this function returns a
36 * string representing the chip model number. The string is of the
37 * form CNXXXXpX.X-FREQ-SUFFIX.
38 * - XXXX = The chip model number
39 * - X.X = Chip pass number
40 * - FREQ = Current frequency in Mhz
41 * - SUFFIX = NSP, EXP, SCP, SSP, or CP
42 *
43 * @chip_id: Chip ID
44 *
45 * Returns Model string
46 */
47const char *octeon_model_get_string(uint32_t chip_id)
48{
49 static char buffer[32];
50 return octeon_model_get_string_buffer(chip_id, buffer);
51}
52
53/*
54 * Version of octeon_model_get_string() that takes buffer as argument,
55 * as running early in u-boot static/global variables don't work when
56 * running from flash.
57 */
58const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
59{
60 const char *family;
61 const char *core_model;
62 char pass[4];
63 int clock_mhz;
64 const char *suffix;
65 union cvmx_l2d_fus3 fus3;
66 int num_cores;
67 union cvmx_mio_fus_dat2 fus_dat2;
68 union cvmx_mio_fus_dat3 fus_dat3;
69 char fuse_model[10];
70 uint32_t fuse_data = 0;
71
72 fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
73 fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
74 fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
75
76 num_cores = cvmx_octeon_num_cores();
77
78 /* Make sure the non existant devices look disabled */
79 switch ((chip_id >> 8) & 0xff) {
80 case 6: /* CN50XX */
81 case 2: /* CN30XX */
82 fus_dat3.s.nodfa_dte = 1;
83 fus_dat3.s.nozip = 1;
84 break;
85 case 4: /* CN57XX or CN56XX */
86 fus_dat3.s.nodfa_dte = 1;
87 break;
88 default:
89 break;
90 }
91
92 /* Make a guess at the suffix */
93 /* NSP = everything */
94 /* EXP = No crypto */
95 /* SCP = No DFA, No zip */
96 /* CP = No DFA, No crypto, No zip */
97 if (fus_dat3.s.nodfa_dte) {
98 if (fus_dat2.s.nocrypto)
99 suffix = "CP";
100 else
101 suffix = "SCP";
102 } else if (fus_dat2.s.nocrypto)
103 suffix = "EXP";
104 else
105 suffix = "NSP";
106
107 /*
108 * Assume pass number is encoded using <5:3><2:0>. Exceptions
109 * will be fixed later.
110 */
111 sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7);
112
113 /*
114 * Use the number of cores to determine the last 2 digits of
115 * the model number. There are some exceptions that are fixed
116 * later.
117 */
118 switch (num_cores) {
119 case 16:
120 core_model = "60";
121 break;
122 case 15:
123 core_model = "58";
124 break;
125 case 14:
126 core_model = "55";
127 break;
128 case 13:
129 core_model = "52";
130 break;
131 case 12:
132 core_model = "50";
133 break;
134 case 11:
135 core_model = "48";
136 break;
137 case 10:
138 core_model = "45";
139 break;
140 case 9:
141 core_model = "42";
142 break;
143 case 8:
144 core_model = "40";
145 break;
146 case 7:
147 core_model = "38";
148 break;
149 case 6:
150 core_model = "34";
151 break;
152 case 5:
153 core_model = "32";
154 break;
155 case 4:
156 core_model = "30";
157 break;
158 case 3:
159 core_model = "25";
160 break;
161 case 2:
162 core_model = "20";
163 break;
164 case 1:
165 core_model = "10";
166 break;
167 default:
168 core_model = "XX";
169 break;
170 }
171
172 /* Now figure out the family, the first two digits */
173 switch ((chip_id >> 8) & 0xff) {
174 case 0: /* CN38XX, CN37XX or CN36XX */
175 if (fus3.cn38xx.crip_512k) {
176 /*
177 * For some unknown reason, the 16 core one is
178 * called 37 instead of 36.
179 */
180 if (num_cores >= 16)
181 family = "37";
182 else
183 family = "36";
184 } else
185 family = "38";
186 /*
187 * This series of chips didn't follow the standard
188 * pass numbering.
189 */
190 switch (chip_id & 0xf) {
191 case 0:
192 strcpy(pass, "1.X");
193 break;
194 case 1:
195 strcpy(pass, "2.X");
196 break;
197 case 3:
198 strcpy(pass, "3.X");
199 break;
200 default:
201 strcpy(pass, "X.X");
202 break;
203 }
204 break;
205 case 1: /* CN31XX or CN3020 */
206 if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
207 family = "30";
208 else
209 family = "31";
210 /*
211 * This series of chips didn't follow the standard
212 * pass numbering.
213 */
214 switch (chip_id & 0xf) {
215 case 0:
216 strcpy(pass, "1.0");
217 break;
218 case 2:
219 strcpy(pass, "1.1");
220 break;
221 default:
222 strcpy(pass, "X.X");
223 break;
224 }
225 break;
226 case 2: /* CN3010 or CN3005 */
227 family = "30";
228 /* A chip with half cache is an 05 */
229 if (fus3.cn30xx.crip_64k)
230 core_model = "05";
231 /*
232 * This series of chips didn't follow the standard
233 * pass numbering.
234 */
235 switch (chip_id & 0xf) {
236 case 0:
237 strcpy(pass, "1.0");
238 break;
239 case 2:
240 strcpy(pass, "1.1");
241 break;
242 default:
243 strcpy(pass, "X.X");
244 break;
245 }
246 break;
247 case 3: /* CN58XX */
248 family = "58";
249 /* Special case. 4 core, no crypto */
250 if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto)
251 core_model = "29";
252
253 /* Pass 1 uses different encodings for pass numbers */
254 if ((chip_id & 0xFF) < 0x8) {
255 switch (chip_id & 0x3) {
256 case 0:
257 strcpy(pass, "1.0");
258 break;
259 case 1:
260 strcpy(pass, "1.1");
261 break;
262 case 3:
263 strcpy(pass, "1.2");
264 break;
265 default:
266 strcpy(pass, "1.X");
267 break;
268 }
269 }
270 break;
271 case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
272 if (fus_dat2.cn56xx.raid_en) {
273 if (fus3.cn56xx.crip_1024k)
274 family = "55";
275 else
276 family = "57";
277 if (fus_dat2.cn56xx.nocrypto)
278 suffix = "SP";
279 else
280 suffix = "SSP";
281 } else {
282 if (fus_dat2.cn56xx.nocrypto)
283 suffix = "CP";
284 else {
285 suffix = "NSP";
286 if (fus_dat3.s.nozip)
287 suffix = "SCP";
288 }
289 if (fus3.cn56xx.crip_1024k)
290 family = "54";
291 else
292 family = "56";
293 }
294 break;
295 case 6: /* CN50XX */
296 family = "50";
297 break;
298 case 7: /* CN52XX */
299 if (fus3.cn52xx.crip_256k)
300 family = "51";
301 else
302 family = "52";
303 break;
304 default:
305 family = "XX";
306 core_model = "XX";
307 strcpy(pass, "X.X");
308 suffix = "XXX";
309 break;
310 }
311
312 clock_mhz = octeon_get_clock_rate() / 1000000;
313
314 if (family[0] != '3') {
315 /* Check for model in fuses, overrides normal decode */
316 /* This is _not_ valid for Octeon CN3XXX models */
317 fuse_data |= cvmx_fuse_read_byte(51);
318 fuse_data = fuse_data << 8;
319 fuse_data |= cvmx_fuse_read_byte(50);
320 fuse_data = fuse_data << 8;
321 fuse_data |= cvmx_fuse_read_byte(49);
322 fuse_data = fuse_data << 8;
323 fuse_data |= cvmx_fuse_read_byte(48);
324 if (fuse_data & 0x7ffff) {
325 int model = fuse_data & 0x3fff;
326 int suffix = (fuse_data >> 14) & 0x1f;
327 if (suffix && model) {
328 /*
329 * Have both number and suffix in
330 * fuses, so both
331 */
332 sprintf(fuse_model, "%d%c",
333 model, 'A' + suffix - 1);
334 core_model = "";
335 family = fuse_model;
336 } else if (suffix && !model) {
337 /*
338 * Only have suffix, so add suffix to
339 * 'normal' model number.
340 */
341 sprintf(fuse_model, "%s%c", core_model,
342 'A' + suffix - 1);
343 core_model = fuse_model;
344 } else {
345 /*
346 * Don't have suffix, so just use
347 * model from fuses.
348 */
349 sprintf(fuse_model, "%d", model);
350 core_model = "";
351 family = fuse_model;
352 }
353 }
354 }
355 sprintf(buffer, "CN%s%sp%s-%d-%s",
356 family, core_model, pass, clock_mhz, suffix);
357 return buffer;
358}
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
new file mode 100644
index 000000000000..553d36cbcc42
--- /dev/null
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -0,0 +1,84 @@
1/*
2 * Octeon Bootbus flash setup
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2007, 2008 Cavium Networks
9 */
10#include <linux/kernel.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/map.h>
13#include <linux/mtd/partitions.h>
14
15#include <asm/octeon/octeon.h>
16
17static struct map_info flash_map;
18static struct mtd_info *mymtd;
19#ifdef CONFIG_MTD_PARTITIONS
20static int nr_parts;
21static struct mtd_partition *parts;
22static const char *part_probe_types[] = {
23 "cmdlinepart",
24#ifdef CONFIG_MTD_REDBOOT_PARTS
25 "RedBoot",
26#endif
27 NULL
28};
29#endif
30
31/**
32 * Module/ driver initialization.
33 *
34 * Returns Zero on success
35 */
36static int __init flash_init(void)
37{
38 /*
39 * Read the bootbus region 0 setup to determine the base
40 * address of the flash.
41 */
42 union cvmx_mio_boot_reg_cfgx region_cfg;
43 region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0));
44 if (region_cfg.s.en) {
45 /*
46 * The bootloader always takes the flash and sets its
47 * address so the entire flash fits below
48 * 0x1fc00000. This way the flash aliases to
49 * 0x1fc00000 for booting. Software can access the
50 * full flash at the true address, while core boot can
51 * access 4MB.
52 */
53 /* Use this name so old part lines work */
54 flash_map.name = "phys_mapped_flash";
55 flash_map.phys = region_cfg.s.base << 16;
56 flash_map.size = 0x1fc00000 - flash_map.phys;
57 flash_map.bankwidth = 1;
58 flash_map.virt = ioremap(flash_map.phys, flash_map.size);
59 pr_notice("Bootbus flash: Setting flash for %luMB flash at "
60 "0x%08lx\n", flash_map.size >> 20, flash_map.phys);
61 simple_map_init(&flash_map);
62 mymtd = do_map_probe("cfi_probe", &flash_map);
63 if (mymtd) {
64 mymtd->owner = THIS_MODULE;
65
66#ifdef CONFIG_MTD_PARTITIONS
67 nr_parts = parse_mtd_partitions(mymtd,
68 part_probe_types,
69 &parts, 0);
70 if (nr_parts > 0)
71 add_mtd_partitions(mymtd, parts, nr_parts);
72 else
73 add_mtd_device(mymtd);
74#else
75 add_mtd_device(mymtd);
76#endif
77 } else {
78 pr_err("Failed to register MTD device for flash\n");
79 }
80 }
81 return 0;
82}
83
84late_initcall(flash_init);
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
new file mode 100644
index 000000000000..fc72984a5dae
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -0,0 +1,497 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2008 Cavium Networks
7 */
8#include <linux/irq.h>
9#include <linux/interrupt.h>
10#include <linux/hardirq.h>
11
12#include <asm/octeon/octeon.h>
13
14DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
15DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
16DEFINE_SPINLOCK(octeon_irq_msi_lock);
17
18static void octeon_irq_core_ack(unsigned int irq)
19{
20 unsigned int bit = irq - OCTEON_IRQ_SW0;
21 /*
22 * We don't need to disable IRQs to make these atomic since
23 * they are already disabled earlier in the low level
24 * interrupt code.
25 */
26 clear_c0_status(0x100 << bit);
27 /* The two user interrupts must be cleared manually. */
28 if (bit < 2)
29 clear_c0_cause(0x100 << bit);
30}
31
32static void octeon_irq_core_eoi(unsigned int irq)
33{
34 irq_desc_t *desc = irq_desc + irq;
35 unsigned int bit = irq - OCTEON_IRQ_SW0;
36 /*
37 * If an IRQ is being processed while we are disabling it the
38 * handler will attempt to unmask the interrupt after it has
39 * been disabled.
40 */
41 if (desc->status & IRQ_DISABLED)
42 return;
43
44 /* There is a race here. We should fix it. */
45
46 /*
47 * We don't need to disable IRQs to make these atomic since
48 * they are already disabled earlier in the low level
49 * interrupt code.
50 */
51 set_c0_status(0x100 << bit);
52}
53
54static void octeon_irq_core_enable(unsigned int irq)
55{
56 unsigned long flags;
57 unsigned int bit = irq - OCTEON_IRQ_SW0;
58
59 /*
60 * We need to disable interrupts to make sure our updates are
61 * atomic.
62 */
63 local_irq_save(flags);
64 set_c0_status(0x100 << bit);
65 local_irq_restore(flags);
66}
67
68static void octeon_irq_core_disable_local(unsigned int irq)
69{
70 unsigned long flags;
71 unsigned int bit = irq - OCTEON_IRQ_SW0;
72 /*
73 * We need to disable interrupts to make sure our updates are
74 * atomic.
75 */
76 local_irq_save(flags);
77 clear_c0_status(0x100 << bit);
78 local_irq_restore(flags);
79}
80
81static void octeon_irq_core_disable(unsigned int irq)
82{
83#ifdef CONFIG_SMP
84 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
85 (void *) (long) irq, 1);
86#else
87 octeon_irq_core_disable_local(irq);
88#endif
89}
90
91static struct irq_chip octeon_irq_chip_core = {
92 .name = "Core",
93 .enable = octeon_irq_core_enable,
94 .disable = octeon_irq_core_disable,
95 .ack = octeon_irq_core_ack,
96 .eoi = octeon_irq_core_eoi,
97};
98
99
100static void octeon_irq_ciu0_ack(unsigned int irq)
101{
102 /*
103 * In order to avoid any locking accessing the CIU, we
104 * acknowledge CIU interrupts by disabling all of them. This
105 * way we can use a per core register and avoid any out of
106 * core locking requirements. This has the side affect that
107 * CIU interrupts can't be processed recursively.
108 *
109 * We don't need to disable IRQs to make these atomic since
110 * they are already disabled earlier in the low level
111 * interrupt code.
112 */
113 clear_c0_status(0x100 << 2);
114}
115
116static void octeon_irq_ciu0_eoi(unsigned int irq)
117{
118 /*
119 * Enable all CIU interrupts again. We don't need to disable
120 * IRQs to make these atomic since they are already disabled
121 * earlier in the low level interrupt code.
122 */
123 set_c0_status(0x100 << 2);
124}
125
126static void octeon_irq_ciu0_enable(unsigned int irq)
127{
128 int coreid = cvmx_get_core_num();
129 unsigned long flags;
130 uint64_t en0;
131 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
132
133 /*
134 * A read lock is used here to make sure only one core is ever
135 * updating the CIU enable bits at a time. During an enable
136 * the cores don't interfere with each other. During a disable
137 * the write lock stops any enables that might cause a
138 * problem.
139 */
140 read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
141 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
142 en0 |= 1ull << bit;
143 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
144 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
145 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
146}
147
148static void octeon_irq_ciu0_disable(unsigned int irq)
149{
150 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
151 unsigned long flags;
152 uint64_t en0;
153#ifdef CONFIG_SMP
154 int cpu;
155 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
156 for_each_online_cpu(cpu) {
157 int coreid = cpu_logical_map(cpu);
158 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
159 en0 &= ~(1ull << bit);
160 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
161 }
162 /*
163 * We need to do a read after the last update to make sure all
164 * of them are done.
165 */
166 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
167 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
168#else
169 int coreid = cvmx_get_core_num();
170 local_irq_save(flags);
171 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
172 en0 &= ~(1ull << bit);
173 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
174 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
175 local_irq_restore(flags);
176#endif
177}
178
179#ifdef CONFIG_SMP
180static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
181{
182 int cpu;
183 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
184
185 write_lock(&octeon_irq_ciu0_rwlock);
186 for_each_online_cpu(cpu) {
187 int coreid = cpu_logical_map(cpu);
188 uint64_t en0 =
189 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
190 if (cpumask_test_cpu(cpu, dest))
191 en0 |= 1ull << bit;
192 else
193 en0 &= ~(1ull << bit);
194 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
195 }
196 /*
197 * We need to do a read after the last update to make sure all
198 * of them are done.
199 */
200 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
201 write_unlock(&octeon_irq_ciu0_rwlock);
202}
203#endif
204
205static struct irq_chip octeon_irq_chip_ciu0 = {
206 .name = "CIU0",
207 .enable = octeon_irq_ciu0_enable,
208 .disable = octeon_irq_ciu0_disable,
209 .ack = octeon_irq_ciu0_ack,
210 .eoi = octeon_irq_ciu0_eoi,
211#ifdef CONFIG_SMP
212 .set_affinity = octeon_irq_ciu0_set_affinity,
213#endif
214};
215
216
217static void octeon_irq_ciu1_ack(unsigned int irq)
218{
219 /*
220 * In order to avoid any locking accessing the CIU, we
221 * acknowledge CIU interrupts by disabling all of them. This
222 * way we can use a per core register and avoid any out of
223 * core locking requirements. This has the side affect that
224 * CIU interrupts can't be processed recursively. We don't
225 * need to disable IRQs to make these atomic since they are
226 * already disabled earlier in the low level interrupt code.
227 */
228 clear_c0_status(0x100 << 3);
229}
230
231static void octeon_irq_ciu1_eoi(unsigned int irq)
232{
233 /*
234 * Enable all CIU interrupts again. We don't need to disable
235 * IRQs to make these atomic since they are already disabled
236 * earlier in the low level interrupt code.
237 */
238 set_c0_status(0x100 << 3);
239}
240
241static void octeon_irq_ciu1_enable(unsigned int irq)
242{
243 int coreid = cvmx_get_core_num();
244 unsigned long flags;
245 uint64_t en1;
246 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
247
248 /*
249 * A read lock is used here to make sure only one core is ever
250 * updating the CIU enable bits at a time. During an enable
251 * the cores don't interfere with each other. During a disable
252 * the write lock stops any enables that might cause a
253 * problem.
254 */
255 read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
256 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
257 en1 |= 1ull << bit;
258 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
259 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
260 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
261}
262
263static void octeon_irq_ciu1_disable(unsigned int irq)
264{
265 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
266 unsigned long flags;
267 uint64_t en1;
268#ifdef CONFIG_SMP
269 int cpu;
270 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
271 for_each_online_cpu(cpu) {
272 int coreid = cpu_logical_map(cpu);
273 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
274 en1 &= ~(1ull << bit);
275 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
276 }
277 /*
278 * We need to do a read after the last update to make sure all
279 * of them are done.
280 */
281 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
282 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
283#else
284 int coreid = cvmx_get_core_num();
285 local_irq_save(flags);
286 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
287 en1 &= ~(1ull << bit);
288 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
289 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
290 local_irq_restore(flags);
291#endif
292}
293
294#ifdef CONFIG_SMP
295static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
296{
297 int cpu;
298 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
299
300 write_lock(&octeon_irq_ciu1_rwlock);
301 for_each_online_cpu(cpu) {
302 int coreid = cpu_logical_map(cpu);
303 uint64_t en1 =
304 cvmx_read_csr(CVMX_CIU_INTX_EN1
305 (coreid * 2 + 1));
306 if (cpumask_test_cpu(cpu, dest))
307 en1 |= 1ull << bit;
308 else
309 en1 &= ~(1ull << bit);
310 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
311 }
312 /*
313 * We need to do a read after the last update to make sure all
314 * of them are done.
315 */
316 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
317 write_unlock(&octeon_irq_ciu1_rwlock);
318}
319#endif
320
321static struct irq_chip octeon_irq_chip_ciu1 = {
322 .name = "CIU1",
323 .enable = octeon_irq_ciu1_enable,
324 .disable = octeon_irq_ciu1_disable,
325 .ack = octeon_irq_ciu1_ack,
326 .eoi = octeon_irq_ciu1_eoi,
327#ifdef CONFIG_SMP
328 .set_affinity = octeon_irq_ciu1_set_affinity,
329#endif
330};
331
332#ifdef CONFIG_PCI_MSI
333
334static void octeon_irq_msi_ack(unsigned int irq)
335{
336 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
337 /* These chips have PCI */
338 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
339 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
340 } else {
341 /*
342 * These chips have PCIe. Thankfully the ACK doesn't
343 * need any locking.
344 */
345 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
346 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
347 }
348}
349
350static void octeon_irq_msi_eoi(unsigned int irq)
351{
352 /* Nothing needed */
353}
354
355static void octeon_irq_msi_enable(unsigned int irq)
356{
357 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
358 /*
359 * Octeon PCI doesn't have the ability to mask/unmask
360 * MSI interrupts individually. Instead of
361 * masking/unmasking them in groups of 16, we simple
362 * assume MSI devices are well behaved. MSI
363 * interrupts are always enable and the ACK is assumed
364 * to be enough.
365 */
366 } else {
367 /* These chips have PCIe. Note that we only support
368 * the first 64 MSI interrupts. Unfortunately all the
369 * MSI enables are in the same register. We use
370 * MSI0's lock to control access to them all.
371 */
372 uint64_t en;
373 unsigned long flags;
374 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
375 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
376 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
377 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
378 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
379 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
380 }
381}
382
383static void octeon_irq_msi_disable(unsigned int irq)
384{
385 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
386 /* See comment in enable */
387 } else {
388 /*
389 * These chips have PCIe. Note that we only support
390 * the first 64 MSI interrupts. Unfortunately all the
391 * MSI enables are in the same register. We use
392 * MSI0's lock to control access to them all.
393 */
394 uint64_t en;
395 unsigned long flags;
396 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
397 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
398 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
399 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
400 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
401 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
402 }
403}
404
405static struct irq_chip octeon_irq_chip_msi = {
406 .name = "MSI",
407 .enable = octeon_irq_msi_enable,
408 .disable = octeon_irq_msi_disable,
409 .ack = octeon_irq_msi_ack,
410 .eoi = octeon_irq_msi_eoi,
411};
412#endif
413
414void __init arch_init_irq(void)
415{
416 int irq;
417
418#ifdef CONFIG_SMP
419 /* Set the default affinity to the boot cpu. */
420 cpumask_clear(irq_default_affinity);
421 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
422#endif
423
424 if (NR_IRQS < OCTEON_IRQ_LAST)
425 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
426
427 /* 0 - 15 reserved for i8259 master and slave controller. */
428
429 /* 17 - 23 Mips internal */
430 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
431 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
432 handle_percpu_irq);
433 }
434
435 /* 24 - 87 CIU_INT_SUM0 */
436 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
437 set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
438 handle_percpu_irq);
439 }
440
441 /* 88 - 151 CIU_INT_SUM1 */
442 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
443 set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
444 handle_percpu_irq);
445 }
446
447#ifdef CONFIG_PCI_MSI
448 /* 152 - 215 PCI/PCIe MSI interrupts */
449 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
450 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
451 handle_percpu_irq);
452 }
453#endif
454 set_c0_status(0x300 << 2);
455}
456
457asmlinkage void plat_irq_dispatch(void)
458{
459 const unsigned long core_id = cvmx_get_core_num();
460 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
461 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
462 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
463 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
464 unsigned long cop0_cause;
465 unsigned long cop0_status;
466 uint64_t ciu_en;
467 uint64_t ciu_sum;
468
469 while (1) {
470 cop0_cause = read_c0_cause();
471 cop0_status = read_c0_status();
472 cop0_cause &= cop0_status;
473 cop0_cause &= ST0_IM;
474
475 if (unlikely(cop0_cause & STATUSF_IP2)) {
476 ciu_sum = cvmx_read_csr(ciu_sum0_address);
477 ciu_en = cvmx_read_csr(ciu_en0_address);
478 ciu_sum &= ciu_en;
479 if (likely(ciu_sum))
480 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
481 else
482 spurious_interrupt();
483 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
484 ciu_sum = cvmx_read_csr(ciu_sum1_address);
485 ciu_en = cvmx_read_csr(ciu_en1_address);
486 ciu_sum &= ciu_en;
487 if (likely(ciu_sum))
488 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
489 else
490 spurious_interrupt();
491 } else if (likely(cop0_cause)) {
492 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
493 } else {
494 break;
495 }
496 }
497}
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
new file mode 100644
index 000000000000..88e0cddca205
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -0,0 +1,521 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Unified implementation of memcpy, memmove and the __copy_user backend.
7 *
8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
10 * Copyright (C) 2002 Broadcom, Inc.
11 * memcpy/copy_user author: Mark Vandevoorde
12 *
13 * Mnemonic names for arguments to memcpy/__copy_user
14 */
15
16#include <asm/asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/regdef.h>
19
20#define dst a0
21#define src a1
22#define len a2
23
24/*
25 * Spec
26 *
27 * memcpy copies len bytes from src to dst and sets v0 to dst.
28 * It assumes that
29 * - src and dst don't overlap
30 * - src is readable
31 * - dst is writable
32 * memcpy uses the standard calling convention
33 *
34 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
35 * the number of uncopied bytes due to an exception caused by a read or write.
36 * __copy_user assumes that src and dst don't overlap, and that the call is
37 * implementing one of the following:
38 * copy_to_user
39 * - src is readable (no exceptions when reading src)
40 * copy_from_user
41 * - dst is writable (no exceptions when writing dst)
42 * __copy_user uses a non-standard calling convention; see
43 * arch/mips/include/asm/uaccess.h
44 *
45 * When an exception happens on a load, the handler must
46 # ensure that all of the destination buffer is overwritten to prevent
47 * leaking information to user mode programs.
48 */
49
50/*
51 * Implementation
52 */
53
54/*
55 * The exception handler for loads requires that:
56 * 1- AT contain the address of the byte just past the end of the source
57 * of the copy,
58 * 2- src_entry <= src < AT, and
59 * 3- (dst - src) == (dst_entry - src_entry),
60 * The _entry suffix denotes values when __copy_user was called.
61 *
62 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
63 * (2) is met by incrementing src by the number of bytes copied
64 * (3) is met by not doing loads between a pair of increments of dst and src
65 *
66 * The exception handlers for stores adjust len (if necessary) and return.
67 * These handlers do not need to overwrite any data.
68 *
69 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
70 * they're not protected.
71 */
72
73#define EXC(inst_reg,addr,handler) \
749: inst_reg, addr; \
75 .section __ex_table,"a"; \
76 PTR 9b, handler; \
77 .previous
78
79/*
80 * Only on the 64-bit kernel we can made use of 64-bit registers.
81 */
82#ifdef CONFIG_64BIT
83#define USE_DOUBLE
84#endif
85
86#ifdef USE_DOUBLE
87
88#define LOAD ld
89#define LOADL ldl
90#define LOADR ldr
91#define STOREL sdl
92#define STORER sdr
93#define STORE sd
94#define ADD daddu
95#define SUB dsubu
96#define SRL dsrl
97#define SRA dsra
98#define SLL dsll
99#define SLLV dsllv
100#define SRLV dsrlv
101#define NBYTES 8
102#define LOG_NBYTES 3
103
104/*
105 * As we are sharing code base with the mips32 tree (which use the o32 ABI
106 * register definitions). We need to redefine the register definitions from
107 * the n64 ABI register naming to the o32 ABI register naming.
108 */
109#undef t0
110#undef t1
111#undef t2
112#undef t3
113#define t0 $8
114#define t1 $9
115#define t2 $10
116#define t3 $11
117#define t4 $12
118#define t5 $13
119#define t6 $14
120#define t7 $15
121
122#else
123
124#define LOAD lw
125#define LOADL lwl
126#define LOADR lwr
127#define STOREL swl
128#define STORER swr
129#define STORE sw
130#define ADD addu
131#define SUB subu
132#define SRL srl
133#define SLL sll
134#define SRA sra
135#define SLLV sllv
136#define SRLV srlv
137#define NBYTES 4
138#define LOG_NBYTES 2
139
140#endif /* USE_DOUBLE */
141
142#ifdef CONFIG_CPU_LITTLE_ENDIAN
143#define LDFIRST LOADR
144#define LDREST LOADL
145#define STFIRST STORER
146#define STREST STOREL
147#define SHIFT_DISCARD SLLV
148#else
149#define LDFIRST LOADL
150#define LDREST LOADR
151#define STFIRST STOREL
152#define STREST STORER
153#define SHIFT_DISCARD SRLV
154#endif
155
156#define FIRST(unit) ((unit)*NBYTES)
157#define REST(unit) (FIRST(unit)+NBYTES-1)
158#define UNIT(unit) FIRST(unit)
159
160#define ADDRMASK (NBYTES-1)
161
162 .text
163 .set noreorder
164 .set noat
165
166/*
167 * A combined memcpy/__copy_user
168 * __copy_user sets len to 0 for success; else to an upper bound of
169 * the number of uncopied bytes.
170 * memcpy sets v0 to dst.
171 */
172 .align 5
173LEAF(memcpy) /* a0=dst a1=src a2=len */
174 move v0, dst /* return value */
175__memcpy:
176FEXPORT(__copy_user)
177 /*
178 * Note: dst & src may be unaligned, len may be 0
179 * Temps
180 */
181 #
182 # Octeon doesn't care if the destination is unaligned. The hardware
183 # can fix it faster than we can special case the assembly.
184 #
185 pref 0, 0(src)
186 sltu t0, len, NBYTES # Check if < 1 word
187 bnez t0, copy_bytes_checklen
188 and t0, src, ADDRMASK # Check if src unaligned
189 bnez t0, src_unaligned
190 sltu t0, len, 4*NBYTES # Check if < 4 words
191 bnez t0, less_than_4units
192 sltu t0, len, 8*NBYTES # Check if < 8 words
193 bnez t0, less_than_8units
194 sltu t0, len, 16*NBYTES # Check if < 16 words
195 bnez t0, cleanup_both_aligned
196 sltu t0, len, 128+1 # Check if len < 129
197 bnez t0, 1f # Skip prefetch if len is too short
198 sltu t0, len, 256+1 # Check if len < 257
199 bnez t0, 1f # Skip prefetch if len is too short
200 pref 0, 128(src) # We must not prefetch invalid addresses
201 #
202 # This is where we loop if there is more than 128 bytes left
2032: pref 0, 256(src) # We must not prefetch invalid addresses
204 #
205 # This is where we loop if we can't prefetch anymore
2061:
207EXC( LOAD t0, UNIT(0)(src), l_exc)
208EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
209EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
210EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
211 SUB len, len, 16*NBYTES
212EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
213EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
214EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
215EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
216EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
217EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
218EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
219EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
220EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
221EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
222EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
223 ADD src, src, 16*NBYTES
224EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
225 ADD dst, dst, 16*NBYTES
226EXC( LOAD t0, UNIT(-8)(src), l_exc_copy)
227EXC( LOAD t1, UNIT(-7)(src), l_exc_copy)
228EXC( LOAD t2, UNIT(-6)(src), l_exc_copy)
229EXC( LOAD t3, UNIT(-5)(src), l_exc_copy)
230EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
231EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
232EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
233EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
234EXC( LOAD t0, UNIT(-4)(src), l_exc_copy)
235EXC( LOAD t1, UNIT(-3)(src), l_exc_copy)
236EXC( LOAD t2, UNIT(-2)(src), l_exc_copy)
237EXC( LOAD t3, UNIT(-1)(src), l_exc_copy)
238EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
239EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
240EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
241EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
242 sltu t0, len, 256+1 # See if we can prefetch more
243 beqz t0, 2b
244 sltu t0, len, 128 # See if we can loop more time
245 beqz t0, 1b
246 nop
247 #
248 # Jump here if there are less than 16*NBYTES left.
249 #
250cleanup_both_aligned:
251 beqz len, done
252 sltu t0, len, 8*NBYTES
253 bnez t0, less_than_8units
254 nop
255EXC( LOAD t0, UNIT(0)(src), l_exc)
256EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
257EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
258EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
259 SUB len, len, 8*NBYTES
260EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
261EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
262EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
263EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
264EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
265EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
266EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
267EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
268EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
269EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
270EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
271EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
272 ADD src, src, 8*NBYTES
273 beqz len, done
274 ADD dst, dst, 8*NBYTES
275 #
276 # Jump here if there are less than 8*NBYTES left.
277 #
278less_than_8units:
279 sltu t0, len, 4*NBYTES
280 bnez t0, less_than_4units
281 nop
282EXC( LOAD t0, UNIT(0)(src), l_exc)
283EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
284EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
285EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
286 SUB len, len, 4*NBYTES
287EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
288EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
289EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
290EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
291 ADD src, src, 4*NBYTES
292 beqz len, done
293 ADD dst, dst, 4*NBYTES
294 #
295 # Jump here if there are less than 4*NBYTES left. This means
296 # we may need to copy up to 3 NBYTES words.
297 #
298less_than_4units:
299 sltu t0, len, 1*NBYTES
300 bnez t0, copy_bytes_checklen
301 nop
302 #
303 # 1) Copy NBYTES, then check length again
304 #
305EXC( LOAD t0, 0(src), l_exc)
306 SUB len, len, NBYTES
307 sltu t1, len, 8
308EXC( STORE t0, 0(dst), s_exc_p1u)
309 ADD src, src, NBYTES
310 bnez t1, copy_bytes_checklen
311 ADD dst, dst, NBYTES
312 #
313 # 2) Copy NBYTES, then check length again
314 #
315EXC( LOAD t0, 0(src), l_exc)
316 SUB len, len, NBYTES
317 sltu t1, len, 8
318EXC( STORE t0, 0(dst), s_exc_p1u)
319 ADD src, src, NBYTES
320 bnez t1, copy_bytes_checklen
321 ADD dst, dst, NBYTES
322 #
323 # 3) Copy NBYTES, then check length again
324 #
325EXC( LOAD t0, 0(src), l_exc)
326 SUB len, len, NBYTES
327 ADD src, src, NBYTES
328 ADD dst, dst, NBYTES
329 b copy_bytes_checklen
330EXC( STORE t0, -8(dst), s_exc_p1u)
331
332src_unaligned:
333#define rem t8
334 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
335 beqz t0, cleanup_src_unaligned
336 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
3371:
338/*
339 * Avoid consecutive LD*'s to the same register since some mips
340 * implementations can't issue them in the same cycle.
341 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
342 * are to the same unit (unless src is aligned, but it's not).
343 */
344EXC( LDFIRST t0, FIRST(0)(src), l_exc)
345EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
346 SUB len, len, 4*NBYTES
347EXC( LDREST t0, REST(0)(src), l_exc_copy)
348EXC( LDREST t1, REST(1)(src), l_exc_copy)
349EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
350EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
351EXC( LDREST t2, REST(2)(src), l_exc_copy)
352EXC( LDREST t3, REST(3)(src), l_exc_copy)
353 ADD src, src, 4*NBYTES
354EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
355EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
356EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
357EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
358 bne len, rem, 1b
359 ADD dst, dst, 4*NBYTES
360
361cleanup_src_unaligned:
362 beqz len, done
363 and rem, len, NBYTES-1 # rem = len % NBYTES
364 beq rem, len, copy_bytes
365 nop
3661:
367EXC( LDFIRST t0, FIRST(0)(src), l_exc)
368EXC( LDREST t0, REST(0)(src), l_exc_copy)
369 SUB len, len, NBYTES
370EXC( STORE t0, 0(dst), s_exc_p1u)
371 ADD src, src, NBYTES
372 bne len, rem, 1b
373 ADD dst, dst, NBYTES
374
375copy_bytes_checklen:
376 beqz len, done
377 nop
378copy_bytes:
379 /* 0 < len < NBYTES */
380#define COPY_BYTE(N) \
381EXC( lb t0, N(src), l_exc); \
382 SUB len, len, 1; \
383 beqz len, done; \
384EXC( sb t0, N(dst), s_exc_p1)
385
386 COPY_BYTE(0)
387 COPY_BYTE(1)
388#ifdef USE_DOUBLE
389 COPY_BYTE(2)
390 COPY_BYTE(3)
391 COPY_BYTE(4)
392 COPY_BYTE(5)
393#endif
394EXC( lb t0, NBYTES-2(src), l_exc)
395 SUB len, len, 1
396 jr ra
397EXC( sb t0, NBYTES-2(dst), s_exc_p1)
398done:
399 jr ra
400 nop
401 END(memcpy)
402
403l_exc_copy:
404 /*
405 * Copy bytes from src until faulting load address (or until a
406 * lb faults)
407 *
408 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
409 * may be more than a byte beyond the last address.
410 * Hence, the lb below may get an exception.
411 *
412 * Assumes src < THREAD_BUADDR($28)
413 */
414 LOAD t0, TI_TASK($28)
415 nop
416 LOAD t0, THREAD_BUADDR(t0)
4171:
418EXC( lb t1, 0(src), l_exc)
419 ADD src, src, 1
420 sb t1, 0(dst) # can't fault -- we're copy_from_user
421 bne src, t0, 1b
422 ADD dst, dst, 1
423l_exc:
424 LOAD t0, TI_TASK($28)
425 nop
426 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
427 nop
428 SUB len, AT, t0 # len number of uncopied bytes
429 /*
430 * Here's where we rely on src and dst being incremented in tandem,
431 * See (3) above.
432 * dst += (fault addr - src) to put dst at first byte to clear
433 */
434 ADD dst, t0 # compute start address in a1
435 SUB dst, src
436 /*
437 * Clear len bytes starting at dst. Can't call __bzero because it
438 * might modify len. An inefficient loop for these rare times...
439 */
440 beqz len, done
441 SUB src, len, 1
4421: sb zero, 0(dst)
443 ADD dst, dst, 1
444 bnez src, 1b
445 SUB src, src, 1
446 jr ra
447 nop
448
449
450#define SEXC(n) \
451s_exc_p ## n ## u: \
452 jr ra; \
453 ADD len, len, n*NBYTES
454
455SEXC(16)
456SEXC(15)
457SEXC(14)
458SEXC(13)
459SEXC(12)
460SEXC(11)
461SEXC(10)
462SEXC(9)
463SEXC(8)
464SEXC(7)
465SEXC(6)
466SEXC(5)
467SEXC(4)
468SEXC(3)
469SEXC(2)
470SEXC(1)
471
472s_exc_p1:
473 jr ra
474 ADD len, len, 1
475s_exc:
476 jr ra
477 nop
478
479 .align 5
480LEAF(memmove)
481 ADD t0, a0, a2
482 ADD t1, a1, a2
483 sltu t0, a1, t0 # dst + len <= src -> memcpy
484 sltu t1, a0, t1 # dst >= src + len -> memcpy
485 and t0, t1
486 beqz t0, __memcpy
487 move v0, a0 /* return value */
488 beqz a2, r_out
489 END(memmove)
490
491 /* fall through to __rmemcpy */
492LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
493 sltu t0, a1, a0
494 beqz t0, r_end_bytes_up # src >= dst
495 nop
496 ADD a0, a2 # dst = dst + len
497 ADD a1, a2 # src = src + len
498
499r_end_bytes:
500 lb t0, -1(a1)
501 SUB a2, a2, 0x1
502 sb t0, -1(a0)
503 SUB a1, a1, 0x1
504 bnez a2, r_end_bytes
505 SUB a0, a0, 0x1
506
507r_out:
508 jr ra
509 move a2, zero
510
511r_end_bytes_up:
512 lb t0, (a1)
513 SUB a2, a2, 0x1
514 sb t0, (a0)
515 ADD a1, a1, 0x1
516 bnez a2, r_end_bytes_up
517 ADD a0, a0, 0x1
518
519 jr ra
520 move a2, zero
521 END(__rmemcpy)
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
new file mode 100644
index 000000000000..8240728d485a
--- /dev/null
+++ b/arch/mips/cavium-octeon/serial.c
@@ -0,0 +1,136 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2007 Cavium Networks
7 */
8#include <linux/console.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/serial.h>
13#include <linux/serial_8250.h>
14#include <linux/serial_reg.h>
15#include <linux/tty.h>
16
17#include <asm/time.h>
18
19#include <asm/octeon/octeon.h>
20
21#ifdef CONFIG_GDB_CONSOLE
22#define DEBUG_UART 0
23#else
24#define DEBUG_UART 1
25#endif
26
27unsigned int octeon_serial_in(struct uart_port *up, int offset)
28{
29 int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
30 if (offset == UART_IIR && (rv & 0xf) == 7) {
31 /* Busy interrupt, read the USR (39) and try again. */
32 cvmx_read_csr((uint64_t)(up->membase + (39 << 3)));
33 rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
34 }
35 return rv;
36}
37
38void octeon_serial_out(struct uart_port *up, int offset, int value)
39{
40 /*
41 * If bits 6 or 7 of the OCTEON UART's LCR are set, it quits
42 * working.
43 */
44 if (offset == UART_LCR)
45 value &= 0x9f;
46 cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value);
47}
48
49/*
50 * Allocated in .bss, so it is all zeroed.
51 */
52#define OCTEON_MAX_UARTS 3
53static struct plat_serial8250_port octeon_uart8250_data[OCTEON_MAX_UARTS + 1];
54static struct platform_device octeon_uart8250_device = {
55 .name = "serial8250",
56 .id = PLAT8250_DEV_PLATFORM,
57 .dev = {
58 .platform_data = octeon_uart8250_data,
59 },
60};
61
62static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
63{
64 p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
65 p->type = PORT_OCTEON;
66 p->iotype = UPIO_MEM;
67 p->regshift = 3; /* I/O addresses are every 8 bytes */
68 p->uartclk = mips_hpt_frequency;
69 p->serial_in = octeon_serial_in;
70 p->serial_out = octeon_serial_out;
71}
72
73static int __init octeon_serial_init(void)
74{
75 int enable_uart0;
76 int enable_uart1;
77 int enable_uart2;
78 struct plat_serial8250_port *p;
79
80#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
81 /*
82 * If we are configured to run as the second of two kernels,
83 * disable uart0 and enable uart1. Uart0 is owned by the first
84 * kernel
85 */
86 enable_uart0 = 0;
87 enable_uart1 = 1;
88#else
89 /*
90 * We are configured for the first kernel. We'll enable uart0
91 * if the bootloader told us to use 0, otherwise will enable
92 * uart 1.
93 */
94 enable_uart0 = (octeon_get_boot_uart() == 0);
95 enable_uart1 = (octeon_get_boot_uart() == 1);
96#ifdef CONFIG_KGDB
97 enable_uart1 = 1;
98#endif
99#endif
100
101 /* Right now CN52XX is the only chip with a third uart */
102 enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX);
103
104 p = octeon_uart8250_data;
105 if (enable_uart0) {
106 /* Add a ttyS device for hardware uart 0 */
107 octeon_uart_set_common(p);
108 p->membase = (void *) CVMX_MIO_UARTX_RBR(0);
109 p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1);
110 p->irq = OCTEON_IRQ_UART0;
111 p++;
112 }
113
114 if (enable_uart1) {
115 /* Add a ttyS device for hardware uart 1 */
116 octeon_uart_set_common(p);
117 p->membase = (void *) CVMX_MIO_UARTX_RBR(1);
118 p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1);
119 p->irq = OCTEON_IRQ_UART1;
120 p++;
121 }
122 if (enable_uart2) {
123 /* Add a ttyS device for hardware uart 2 */
124 octeon_uart_set_common(p);
125 p->membase = (void *) CVMX_MIO_UART2_RBR;
126 p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1);
127 p->irq = OCTEON_IRQ_UART2;
128 p++;
129 }
130
131 BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]);
132
133 return platform_device_register(&octeon_uart8250_device);
134}
135
136device_initcall(octeon_serial_init);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
new file mode 100644
index 000000000000..5f4e49ba4713
--- /dev/null
+++ b/arch/mips/cavium-octeon/setup.c
@@ -0,0 +1,927 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2007 Cavium Networks
7 * Copyright (C) 2008 Wind River Systems
8 */
9#include <linux/init.h>
10#include <linux/console.h>
11#include <linux/delay.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/serial.h>
16#include <linux/types.h>
17#include <linux/string.h> /* for memset */
18#include <linux/tty.h>
19#include <linux/time.h>
20#include <linux/platform_device.h>
21#include <linux/serial_core.h>
22#include <linux/serial_8250.h>
23
24#include <asm/processor.h>
25#include <asm/reboot.h>
26#include <asm/smp-ops.h>
27#include <asm/system.h>
28#include <asm/irq_cpu.h>
29#include <asm/mipsregs.h>
30#include <asm/bootinfo.h>
31#include <asm/sections.h>
32#include <asm/time.h>
33
34#include <asm/octeon/octeon.h>
35
36#ifdef CONFIG_CAVIUM_DECODE_RSL
37extern void cvmx_interrupt_rsl_decode(void);
38extern int __cvmx_interrupt_ecc_report_single_bit_errors;
39extern void cvmx_interrupt_rsl_enable(void);
40#endif
41
42extern struct plat_smp_ops octeon_smp_ops;
43
44#ifdef CONFIG_PCI
45extern void pci_console_init(const char *arg);
46#endif
47
48#ifdef CONFIG_CAVIUM_RESERVE32
49extern uint64_t octeon_reserve32_memory;
50#endif
51static unsigned long long MAX_MEMORY = 512ull << 20;
52
53struct octeon_boot_descriptor *octeon_boot_desc_ptr;
54
55struct cvmx_bootinfo *octeon_bootinfo;
56EXPORT_SYMBOL(octeon_bootinfo);
57
58#ifdef CONFIG_CAVIUM_RESERVE32
59uint64_t octeon_reserve32_memory;
60EXPORT_SYMBOL(octeon_reserve32_memory);
61#endif
62
63static int octeon_uart;
64
65extern asmlinkage void handle_int(void);
66extern asmlinkage void plat_irq_dispatch(void);
67
68/**
69 * Return non zero if we are currently running in the Octeon simulator
70 *
71 * Returns
72 */
73int octeon_is_simulation(void)
74{
75 return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
76}
77EXPORT_SYMBOL(octeon_is_simulation);
78
79/**
80 * Return true if Octeon is in PCI Host mode. This means
81 * Linux can control the PCI bus.
82 *
83 * Returns Non zero if Octeon in host mode.
84 */
85int octeon_is_pci_host(void)
86{
87#ifdef CONFIG_PCI
88 return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
89#else
90 return 0;
91#endif
92}
93
94/**
95 * Get the clock rate of Octeon
96 *
97 * Returns Clock rate in HZ
98 */
99uint64_t octeon_get_clock_rate(void)
100{
101 if (octeon_is_simulation())
102 octeon_bootinfo->eclock_hz = 6000000;
103 return octeon_bootinfo->eclock_hz;
104}
105EXPORT_SYMBOL(octeon_get_clock_rate);
106
107/**
108 * Write to the LCD display connected to the bootbus. This display
109 * exists on most Cavium evaluation boards. If it doesn't exist, then
110 * this function doesn't do anything.
111 *
112 * @s: String to write
113 */
114void octeon_write_lcd(const char *s)
115{
116 if (octeon_bootinfo->led_display_base_addr) {
117 void __iomem *lcd_address =
118 ioremap_nocache(octeon_bootinfo->led_display_base_addr,
119 8);
120 int i;
121 for (i = 0; i < 8; i++, s++) {
122 if (*s)
123 iowrite8(*s, lcd_address + i);
124 else
125 iowrite8(' ', lcd_address + i);
126 }
127 iounmap(lcd_address);
128 }
129}
130
131/**
132 * Return the console uart passed by the bootloader
133 *
134 * Returns uart (0 or 1)
135 */
136int octeon_get_boot_uart(void)
137{
138 int uart;
139#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
140 uart = 1;
141#else
142 uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
143 1 : 0;
144#endif
145 return uart;
146}
147
148/**
149 * Get the coremask Linux was booted on.
150 *
151 * Returns Core mask
152 */
153int octeon_get_boot_coremask(void)
154{
155 return octeon_boot_desc_ptr->core_mask;
156}
157
158/**
159 * Check the hardware BIST results for a CPU
160 */
161void octeon_check_cpu_bist(void)
162{
163 const int coreid = cvmx_get_core_num();
164 unsigned long long mask;
165 unsigned long long bist_val;
166
167 /* Check BIST results for COP0 registers */
168 mask = 0x1f00000000ull;
169 bist_val = read_octeon_c0_icacheerr();
170 if (bist_val & mask)
171 pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
172 coreid, bist_val);
173
174 bist_val = read_octeon_c0_dcacheerr();
175 if (bist_val & 1)
176 pr_err("Core%d L1 Dcache parity error: "
177 "CacheErr(dcache) = 0x%llx\n",
178 coreid, bist_val);
179
180 mask = 0xfc00000000000000ull;
181 bist_val = read_c0_cvmmemctl();
182 if (bist_val & mask)
183 pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
184 coreid, bist_val);
185
186 write_octeon_c0_dcacheerr(0);
187}
188
189#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
190/**
191 * Called on every core to setup the wired tlb entry needed
192 * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
193 *
194 */
195static void octeon_hal_setup_per_cpu_reserved32(void *unused)
196{
197 /*
198 * The config has selected to wire the reserve32 memory for all
199 * userspace applications. We need to put a wired TLB entry in for each
200 * 512MB of reserve32 memory. We only handle double 256MB pages here,
201 * so reserve32 must be multiple of 512MB.
202 */
203 uint32_t size = CONFIG_CAVIUM_RESERVE32;
204 uint32_t entrylo0 =
205 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
206 uint32_t entrylo1 = entrylo0 + (256 << 14);
207 uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
208 while (size >= 512) {
209#if 0
210 pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
211 smp_processor_id(), entryhi);
212#endif
213 add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
214 entrylo0 += 512 << 14;
215 entrylo1 += 512 << 14;
216 entryhi += 512 << 20;
217 size -= 512;
218 }
219}
220#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
221
222/**
223 * Called to release the named block which was used to made sure
224 * that nobody used the memory for something else during
225 * init. Now we'll free it so userspace apps can use this
226 * memory region with bootmem_alloc.
227 *
228 * This function is called only once from prom_free_prom_memory().
229 */
230void octeon_hal_setup_reserved32(void)
231{
232#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
233 on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
234#endif
235}
236
237/**
238 * Reboot Octeon
239 *
240 * @command: Command to pass to the bootloader. Currently ignored.
241 */
242static void octeon_restart(char *command)
243{
244 /* Disable all watchdogs before soft reset. They don't get cleared */
245#ifdef CONFIG_SMP
246 int cpu;
247 for_each_online_cpu(cpu)
248 cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
249#else
250 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
251#endif
252
253 mb();
254 while (1)
255 cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
256}
257
258
259/**
260 * Permanently stop a core.
261 *
262 * @arg: Ignored.
263 */
264static void octeon_kill_core(void *arg)
265{
266 mb();
267 if (octeon_is_simulation()) {
268 /* The simulator needs the watchdog to stop for dead cores */
269 cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
270 /* A break instruction causes the simulator stop a core */
271 asm volatile ("sync\nbreak");
272 }
273}
274
275
276/**
277 * Halt the system
278 */
279static void octeon_halt(void)
280{
281 smp_call_function(octeon_kill_core, NULL, 0);
282
283 switch (octeon_bootinfo->board_type) {
284 case CVMX_BOARD_TYPE_NAO38:
285 /* Driving a 1 to GPIO 12 shuts off this board */
286 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
287 cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
288 break;
289 default:
290 octeon_write_lcd("PowerOff");
291 break;
292 }
293
294 octeon_kill_core(NULL);
295}
296
297#if 0
298/**
299 * Platform time init specifics.
300 * Returns
301 */
302void __init plat_time_init(void)
303{
304 /* Nothing special here, but we are required to have one */
305}
306
307#endif
308
309/**
310 * Handle all the error condition interrupts that might occur.
311 *
312 */
313#ifdef CONFIG_CAVIUM_DECODE_RSL
314static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
315{
316 cvmx_interrupt_rsl_decode();
317 return IRQ_HANDLED;
318}
319#endif
320
321/**
322 * Return a string representing the system type
323 *
324 * Returns
325 */
326const char *octeon_board_type_string(void)
327{
328 static char name[80];
329 sprintf(name, "%s (%s)",
330 cvmx_board_type_to_string(octeon_bootinfo->board_type),
331 octeon_model_get_string(read_c0_prid()));
332 return name;
333}
334
335const char *get_system_type(void)
336 __attribute__ ((alias("octeon_board_type_string")));
337
338void octeon_user_io_init(void)
339{
340 union octeon_cvmemctl cvmmemctl;
341 union cvmx_iob_fau_timeout fau_timeout;
342 union cvmx_pow_nw_tim nm_tim;
343 uint64_t cvmctl;
344
345 /* Get the current settings for CP0_CVMMEMCTL_REG */
346 cvmmemctl.u64 = read_c0_cvmmemctl();
347 /* R/W If set, marked write-buffer entries time out the same
348 * as as other entries; if clear, marked write-buffer entries
349 * use the maximum timeout. */
350 cvmmemctl.s.dismarkwblongto = 1;
351 /* R/W If set, a merged store does not clear the write-buffer
352 * entry timeout state. */
353 cvmmemctl.s.dismrgclrwbto = 0;
354 /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
355 * word location for an IOBDMA. The other 8 bits come from the
356 * SCRADDR field of the IOBDMA. */
357 cvmmemctl.s.iobdmascrmsb = 0;
358 /* R/W If set, SYNCWS and SYNCS only order marked stores; if
359 * clear, SYNCWS and SYNCS only order unmarked
360 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
361 * set. */
362 cvmmemctl.s.syncwsmarked = 0;
363 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
364 cvmmemctl.s.dissyncws = 0;
365 /* R/W If set, no stall happens on write buffer full. */
366 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
367 cvmmemctl.s.diswbfst = 1;
368 else
369 cvmmemctl.s.diswbfst = 0;
370 /* R/W If set (and SX set), supervisor-level loads/stores can
371 * use XKPHYS addresses with <48>==0 */
372 cvmmemctl.s.xkmemenas = 0;
373
374 /* R/W If set (and UX set), user-level loads/stores can use
375 * XKPHYS addresses with VA<48>==0 */
376 cvmmemctl.s.xkmemenau = 0;
377
378 /* R/W If set (and SX set), supervisor-level loads/stores can
379 * use XKPHYS addresses with VA<48>==1 */
380 cvmmemctl.s.xkioenas = 0;
381
382 /* R/W If set (and UX set), user-level loads/stores can use
383 * XKPHYS addresses with VA<48>==1 */
384 cvmmemctl.s.xkioenau = 0;
385
386 /* R/W If set, all stores act as SYNCW (NOMERGE must be set
387 * when this is set) RW, reset to 0. */
388 cvmmemctl.s.allsyncw = 0;
389
390 /* R/W If set, no stores merge, and all stores reach the
391 * coherent bus in order. */
392 cvmmemctl.s.nomerge = 0;
393 /* R/W Selects the bit in the counter used for DID time-outs 0
394 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
395 * between 1x and 2x this interval. For example, with
396 * DIDTTO=3, expiration interval is between 16K and 32K. */
397 cvmmemctl.s.didtto = 0;
398 /* R/W If set, the (mem) CSR clock never turns off. */
399 cvmmemctl.s.csrckalwys = 0;
400 /* R/W If set, mclk never turns off. */
401 cvmmemctl.s.mclkalwys = 0;
402 /* R/W Selects the bit in the counter used for write buffer
403 * flush time-outs (WBFLT+11) is the bit position in an
404 * internal counter used to determine expiration. The write
405 * buffer expires between 1x and 2x this interval. For
406 * example, with WBFLT = 0, a write buffer expires between 2K
407 * and 4K cycles after the write buffer entry is allocated. */
408 cvmmemctl.s.wbfltime = 0;
409 /* R/W If set, do not put Istream in the L2 cache. */
410 cvmmemctl.s.istrnol2 = 0;
411 /* R/W The write buffer threshold. */
412 cvmmemctl.s.wbthresh = 10;
413 /* R/W If set, CVMSEG is available for loads/stores in
414 * kernel/debug mode. */
415#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
416 cvmmemctl.s.cvmsegenak = 1;
417#else
418 cvmmemctl.s.cvmsegenak = 0;
419#endif
420 /* R/W If set, CVMSEG is available for loads/stores in
421 * supervisor mode. */
422 cvmmemctl.s.cvmsegenas = 0;
423 /* R/W If set, CVMSEG is available for loads/stores in user
424 * mode. */
425 cvmmemctl.s.cvmsegenau = 0;
426 /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
427 * is max legal value. */
428 cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
429
430
431 if (smp_processor_id() == 0)
432 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
433 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
434 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
435
436 write_c0_cvmmemctl(cvmmemctl.u64);
437
438 /* Move the performance counter interrupts to IRQ 6 */
439 cvmctl = read_c0_cvmctl();
440 cvmctl &= ~(7 << 7);
441 cvmctl |= 6 << 7;
442 write_c0_cvmctl(cvmctl);
443
444 /* Set a default for the hardware timeouts */
445 fau_timeout.u64 = 0;
446 fau_timeout.s.tout_val = 0xfff;
447 /* Disable tagwait FAU timeout */
448 fau_timeout.s.tout_enb = 0;
449 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
450
451 nm_tim.u64 = 0;
452 /* 4096 cycles */
453 nm_tim.s.nw_tim = 3;
454 cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
455
456 write_octeon_c0_icacheerr(0);
457 write_c0_derraddr1(0);
458}
459
460/**
461 * Early entry point for arch setup
462 */
463void __init prom_init(void)
464{
465 struct cvmx_sysinfo *sysinfo;
466 const int coreid = cvmx_get_core_num();
467 int i;
468 int argc;
469 struct uart_port octeon_port;
470#ifdef CONFIG_CAVIUM_RESERVE32
471 int64_t addr = -1;
472#endif
473 /*
474 * The bootloader passes a pointer to the boot descriptor in
475 * $a3, this is available as fw_arg3.
476 */
477 octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
478 octeon_bootinfo =
479 cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
480 cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
481
482 /*
483 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
484 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
485 */
486 if (!octeon_is_simulation() &&
487 octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
488 cvmx_write_csr(CVMX_LED_EN, 0);
489 cvmx_write_csr(CVMX_LED_PRT, 0);
490 cvmx_write_csr(CVMX_LED_DBG, 0);
491 cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
492 cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
493 cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
494 cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
495 cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
496 cvmx_write_csr(CVMX_LED_EN, 1);
497 }
498#ifdef CONFIG_CAVIUM_RESERVE32
499 /*
500 * We need to temporarily allocate all memory in the reserve32
501 * region. This makes sure the kernel doesn't allocate this
502 * memory when it is getting memory from the
503 * bootloader. Later, after the memory allocations are
504 * complete, the reserve32 will be freed.
505 */
506#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
507 if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
508 pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
509 "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
510 "is set\n");
511 else
512 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
513 0, 0, 512 << 20,
514 "CAVIUM_RESERVE32", 0);
515#else
516 /*
517 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
518 * is in case we later use hugetlb entries with it.
519 */
520 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
521 0, 0, 2 << 20,
522 "CAVIUM_RESERVE32", 0);
523#endif
524 if (addr < 0)
525 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
526 else
527 octeon_reserve32_memory = addr;
528#endif
529
530#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
531 if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
532 pr_info("Skipping L2 locking due to reduced L2 cache size\n");
533 } else {
534 uint32_t ebase = read_c0_ebase() & 0x3ffff000;
535#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
536 /* TLB refill */
537 cvmx_l2c_lock_mem_region(ebase, 0x100);
538#endif
539#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
540 /* General exception */
541 cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
542#endif
543#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
544 /* Interrupt handler */
545 cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
546#endif
547#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
548 cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
549 cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
550#endif
551#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
552 cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
553#endif
554 }
555#endif
556
557 sysinfo = cvmx_sysinfo_get();
558 memset(sysinfo, 0, sizeof(*sysinfo));
559 sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
560 sysinfo->phy_mem_desc_ptr =
561 cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
562 sysinfo->core_mask = octeon_bootinfo->core_mask;
563 sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
564 sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
565 sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
566 sysinfo->board_type = octeon_bootinfo->board_type;
567 sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
568 sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
569 memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
570 sizeof(sysinfo->mac_addr_base));
571 sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
572 memcpy(sysinfo->board_serial_number,
573 octeon_bootinfo->board_serial_number,
574 sizeof(sysinfo->board_serial_number));
575 sysinfo->compact_flash_common_base_addr =
576 octeon_bootinfo->compact_flash_common_base_addr;
577 sysinfo->compact_flash_attribute_base_addr =
578 octeon_bootinfo->compact_flash_attribute_base_addr;
579 sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
580 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
581 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
582
583
584 octeon_check_cpu_bist();
585
586 octeon_uart = octeon_get_boot_uart();
587
588 /*
589 * Disable All CIU Interrupts. The ones we need will be
590 * enabled later. Read the SUM register so we know the write
591 * completed.
592 */
593 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
594 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
595 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
596 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
597 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
598
599#ifdef CONFIG_SMP
600 octeon_write_lcd("LinuxSMP");
601#else
602 octeon_write_lcd("Linux");
603#endif
604
605#ifdef CONFIG_CAVIUM_GDB
606 /*
607 * When debugging the linux kernel, force the cores to enter
608 * the debug exception handler to break in.
609 */
610 if (octeon_get_boot_debug_flag()) {
611 cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
612 cvmx_read_csr(CVMX_CIU_DINT);
613 }
614#endif
615
616 /*
617 * BIST should always be enabled when doing a soft reset. L2
618 * Cache locking for instance is not cleared unless BIST is
619 * enabled. Unfortunately due to a chip errata G-200 for
620 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
621 */
622 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
623 OCTEON_IS_MODEL(OCTEON_CN31XX))
624 cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
625 else
626 cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
627
628 /* Default to 64MB in the simulator to speed things up */
629 if (octeon_is_simulation())
630 MAX_MEMORY = 64ull << 20;
631
632 arcs_cmdline[0] = 0;
633 argc = octeon_boot_desc_ptr->argc;
634 for (i = 0; i < argc; i++) {
635 const char *arg =
636 cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
637 if ((strncmp(arg, "MEM=", 4) == 0) ||
638 (strncmp(arg, "mem=", 4) == 0)) {
639 sscanf(arg + 4, "%llu", &MAX_MEMORY);
640 MAX_MEMORY <<= 20;
641 if (MAX_MEMORY == 0)
642 MAX_MEMORY = 32ull << 30;
643 } else if (strcmp(arg, "ecc_verbose") == 0) {
644#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
645 __cvmx_interrupt_ecc_report_single_bit_errors = 1;
646 pr_notice("Reporting of single bit ECC errors is "
647 "turned on\n");
648#endif
649 } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
650 sizeof(arcs_cmdline) - 1) {
651 strcat(arcs_cmdline, " ");
652 strcat(arcs_cmdline, arg);
653 }
654 }
655
656 if (strstr(arcs_cmdline, "console=") == NULL) {
657#ifdef CONFIG_GDB_CONSOLE
658 strcat(arcs_cmdline, " console=gdb");
659#else
660#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
661 strcat(arcs_cmdline, " console=ttyS0,115200");
662#else
663 if (octeon_uart == 1)
664 strcat(arcs_cmdline, " console=ttyS1,115200");
665 else
666 strcat(arcs_cmdline, " console=ttyS0,115200");
667#endif
668#endif
669 }
670
671 if (octeon_is_simulation()) {
672 /*
673 * The simulator uses a mtdram device pre filled with
674 * the filesystem. Also specify the calibration delay
675 * to avoid calculating it every time.
676 */
677 strcat(arcs_cmdline, " rw root=1f00"
678 " lpj=60176 slram=root,0x40000000,+1073741824");
679 }
680
681 mips_hpt_frequency = octeon_get_clock_rate();
682
683 octeon_init_cvmcount();
684
685 _machine_restart = octeon_restart;
686 _machine_halt = octeon_halt;
687
688 memset(&octeon_port, 0, sizeof(octeon_port));
689 /*
690 * For early_serial_setup we don't set the port type or
691 * UPF_FIXED_TYPE.
692 */
693 octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
694 octeon_port.iotype = UPIO_MEM;
695 /* I/O addresses are every 8 bytes */
696 octeon_port.regshift = 3;
697 /* Clock rate of the chip */
698 octeon_port.uartclk = mips_hpt_frequency;
699 octeon_port.fifosize = 64;
700 octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
701 octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
702 octeon_port.serial_in = octeon_serial_in;
703 octeon_port.serial_out = octeon_serial_out;
704#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
705 octeon_port.line = 0;
706#else
707 octeon_port.line = octeon_uart;
708#endif
709 octeon_port.irq = 42 + octeon_uart;
710 early_serial_setup(&octeon_port);
711
712 octeon_user_io_init();
713 register_smp_ops(&octeon_smp_ops);
714}
715
716void __init plat_mem_setup(void)
717{
718 uint64_t mem_alloc_size;
719 uint64_t total;
720 int64_t memory;
721
722 total = 0;
723
724 /* First add the init memory we will be returning. */
725 memory = __pa_symbol(&__init_begin) & PAGE_MASK;
726 mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
727 if (mem_alloc_size > 0) {
728 add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
729 total += mem_alloc_size;
730 }
731
732 /*
733 * The Mips memory init uses the first memory location for
734 * some memory vectors. When SPARSEMEM is in use, it doesn't
735 * verify that the size is big enough for the final
736 * vectors. Making the smallest chuck 4MB seems to be enough
737 * to consistantly work.
738 */
739 mem_alloc_size = 4 << 20;
740 if (mem_alloc_size > MAX_MEMORY)
741 mem_alloc_size = MAX_MEMORY;
742
743 /*
744 * When allocating memory, we want incrementing addresses from
745 * bootmem_alloc so the code in add_memory_region can merge
746 * regions next to each other.
747 */
748 cvmx_bootmem_lock();
749 while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
750 && (total < MAX_MEMORY)) {
751#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
752 memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
753 __pa_symbol(&__init_end), -1,
754 0x100000,
755 CVMX_BOOTMEM_FLAG_NO_LOCKING);
756#elif defined(CONFIG_HIGHMEM)
757 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
758 0x100000,
759 CVMX_BOOTMEM_FLAG_NO_LOCKING);
760#else
761 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
762 0x100000,
763 CVMX_BOOTMEM_FLAG_NO_LOCKING);
764#endif
765 if (memory >= 0) {
766 /*
767 * This function automatically merges address
768 * regions next to each other if they are
769 * received in incrementing order.
770 */
771 add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
772 total += mem_alloc_size;
773 } else {
774 break;
775 }
776 }
777 cvmx_bootmem_unlock();
778
779#ifdef CONFIG_CAVIUM_RESERVE32
780 /*
781 * Now that we've allocated the kernel memory it is safe to
782 * free the reserved region. We free it here so that builtin
783 * drivers can use the memory.
784 */
785 if (octeon_reserve32_memory)
786 cvmx_bootmem_free_named("CAVIUM_RESERVE32");
787#endif /* CONFIG_CAVIUM_RESERVE32 */
788
789 if (total == 0)
790 panic("Unable to allocate memory from "
791 "cvmx_bootmem_phy_alloc\n");
792}
793
794
795int prom_putchar(char c)
796{
797 uint64_t lsrval;
798
799 /* Spin until there is room */
800 do {
801 lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
802 } while ((lsrval & 0x20) == 0);
803
804 /* Write the byte */
805 cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
806 return 1;
807}
808
809void prom_free_prom_memory(void)
810{
811#ifdef CONFIG_CAVIUM_DECODE_RSL
812 cvmx_interrupt_rsl_enable();
813
814 /* Add an interrupt handler for general failures. */
815 if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
816 "RML/RSL", octeon_rlm_interrupt)) {
817 panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
818 }
819#endif
820
821 /* This call is here so that it is performed after any TLB
822 initializations. It needs to be after these in case the
823 CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
824 octeon_hal_setup_reserved32();
825}
826
827static struct octeon_cf_data octeon_cf_data;
828
829static int __init octeon_cf_device_init(void)
830{
831 union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
832 unsigned long base_ptr, region_base, region_size;
833 struct platform_device *pd;
834 struct resource cf_resources[3];
835 unsigned int num_resources;
836 int i;
837 int ret = 0;
838
839 /* Setup octeon-cf platform device if present. */
840 base_ptr = 0;
841 if (octeon_bootinfo->major_version == 1
842 && octeon_bootinfo->minor_version >= 1) {
843 if (octeon_bootinfo->compact_flash_common_base_addr)
844 base_ptr =
845 octeon_bootinfo->compact_flash_common_base_addr;
846 } else {
847 base_ptr = 0x1d000800;
848 }
849
850 if (!base_ptr)
851 return ret;
852
853 /* Find CS0 region. */
854 for (i = 0; i < 8; i++) {
855 mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
856 region_base = mio_boot_reg_cfg.s.base << 16;
857 region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
858 if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
859 && base_ptr < region_base + region_size)
860 break;
861 }
862 if (i >= 7) {
863 /* i and i + 1 are CS0 and CS1, both must be less than 8. */
864 goto out;
865 }
866 octeon_cf_data.base_region = i;
867 octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
868 octeon_cf_data.base_region_bias = base_ptr - region_base;
869 memset(cf_resources, 0, sizeof(cf_resources));
870 num_resources = 0;
871 cf_resources[num_resources].flags = IORESOURCE_MEM;
872 cf_resources[num_resources].start = region_base;
873 cf_resources[num_resources].end = region_base + region_size - 1;
874 num_resources++;
875
876
877 if (!(base_ptr & 0xfffful)) {
878 /*
879 * Boot loader signals availability of DMA (true_ide
880 * mode) by setting low order bits of base_ptr to
881 * zero.
882 */
883
884 /* Asume that CS1 immediately follows. */
885 mio_boot_reg_cfg.u64 =
886 cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
887 region_base = mio_boot_reg_cfg.s.base << 16;
888 region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
889 if (!mio_boot_reg_cfg.s.en)
890 goto out;
891
892 cf_resources[num_resources].flags = IORESOURCE_MEM;
893 cf_resources[num_resources].start = region_base;
894 cf_resources[num_resources].end = region_base + region_size - 1;
895 num_resources++;
896
897 octeon_cf_data.dma_engine = 0;
898 cf_resources[num_resources].flags = IORESOURCE_IRQ;
899 cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA;
900 cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA;
901 num_resources++;
902 } else {
903 octeon_cf_data.dma_engine = -1;
904 }
905
906 pd = platform_device_alloc("pata_octeon_cf", -1);
907 if (!pd) {
908 ret = -ENOMEM;
909 goto out;
910 }
911 pd->dev.platform_data = &octeon_cf_data;
912
913 ret = platform_device_add_resources(pd, cf_resources, num_resources);
914 if (ret)
915 goto fail;
916
917 ret = platform_device_add(pd);
918 if (ret)
919 goto fail;
920
921 return ret;
922fail:
923 platform_device_put(pd);
924out:
925 return ret;
926}
927device_initcall(octeon_cf_device_init);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
new file mode 100644
index 000000000000..24e0ad63980a
--- /dev/null
+++ b/arch/mips/cavium-octeon/smp.c
@@ -0,0 +1,211 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2008 Cavium Networks
7 */
8#include <linux/init.h>
9#include <linux/delay.h>
10#include <linux/smp.h>
11#include <linux/interrupt.h>
12#include <linux/kernel_stat.h>
13#include <linux/sched.h>
14#include <linux/module.h>
15
16#include <asm/mmu_context.h>
17#include <asm/system.h>
18#include <asm/time.h>
19
20#include <asm/octeon/octeon.h>
21
22volatile unsigned long octeon_processor_boot = 0xff;
23volatile unsigned long octeon_processor_sp;
24volatile unsigned long octeon_processor_gp;
25
26static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
27{
28 const int coreid = cvmx_get_core_num();
29 uint64_t action;
30
31 /* Load the mailbox register to figure out what we're supposed to do */
32 action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid));
33
34 /* Clear the mailbox to clear the interrupt */
35 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
36
37 if (action & SMP_CALL_FUNCTION)
38 smp_call_function_interrupt();
39
40 /* Check if we've been told to flush the icache */
41 if (action & SMP_ICACHE_FLUSH)
42 asm volatile ("synci 0($0)\n");
43 return IRQ_HANDLED;
44}
45
46/**
47 * Cause the function described by call_data to be executed on the passed
48 * cpu. When the function has finished, increment the finished field of
49 * call_data.
50 */
51void octeon_send_ipi_single(int cpu, unsigned int action)
52{
53 int coreid = cpu_logical_map(cpu);
54 /*
55 pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
56 coreid, action);
57 */
58 cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
59}
60
61static inline void octeon_send_ipi_mask(cpumask_t mask, unsigned int action)
62{
63 unsigned int i;
64
65 for_each_cpu_mask(i, mask)
66 octeon_send_ipi_single(i, action);
67}
68
69/**
70 * Detect available CPUs, populate phys_cpu_present_map
71 */
72static void octeon_smp_setup(void)
73{
74 const int coreid = cvmx_get_core_num();
75 int cpus;
76 int id;
77
78 int core_mask = octeon_get_boot_coremask();
79
80 cpus_clear(cpu_possible_map);
81 __cpu_number_map[coreid] = 0;
82 __cpu_logical_map[0] = coreid;
83 cpu_set(0, cpu_possible_map);
84
85 cpus = 1;
86 for (id = 0; id < 16; id++) {
87 if ((id != coreid) && (core_mask & (1 << id))) {
88 cpu_set(cpus, cpu_possible_map);
89 __cpu_number_map[id] = cpus;
90 __cpu_logical_map[cpus] = id;
91 cpus++;
92 }
93 }
94}
95
96/**
97 * Firmware CPU startup hook
98 *
99 */
100static void octeon_boot_secondary(int cpu, struct task_struct *idle)
101{
102 int count;
103
104 pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
105 cpu_logical_map(cpu));
106
107 octeon_processor_sp = __KSTK_TOS(idle);
108 octeon_processor_gp = (unsigned long)(task_thread_info(idle));
109 octeon_processor_boot = cpu_logical_map(cpu);
110 mb();
111
112 count = 10000;
113 while (octeon_processor_sp && count) {
114 /* Waiting for processor to get the SP and GP */
115 udelay(1);
116 count--;
117 }
118 if (count == 0)
119 pr_err("Secondary boot timeout\n");
120}
121
122/**
123 * After we've done initial boot, this function is called to allow the
124 * board code to clean up state, if needed
125 */
126static void octeon_init_secondary(void)
127{
128 const int coreid = cvmx_get_core_num();
129 union cvmx_ciu_intx_sum0 interrupt_enable;
130
131 octeon_check_cpu_bist();
132 octeon_init_cvmcount();
133 /*
134 pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid);
135 */
136 /* Enable Mailbox interrupts to this core. These are the only
137 interrupts allowed on line 3 */
138 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
139 interrupt_enable.u64 = 0;
140 interrupt_enable.s.mbox = 0x3;
141 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
142 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
143 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
144 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
145 /* Enable core interrupt processing for 2,3 and 7 */
146 set_c0_status(0x8c01);
147}
148
149/**
150 * Callout to firmware before smp_init
151 *
152 */
153void octeon_prepare_cpus(unsigned int max_cpus)
154{
155 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
156 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_SHARED,
157 "mailbox0", mailbox_interrupt)) {
158 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
159 }
160 if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_SHARED,
161 "mailbox1", mailbox_interrupt)) {
162 panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
163 }
164}
165
166/**
167 * Last chance for the board code to finish SMP initialization before
168 * the CPU is "online".
169 */
170static void octeon_smp_finish(void)
171{
172#ifdef CONFIG_CAVIUM_GDB
173 unsigned long tmp;
174 /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
175 to be not masked by this core so we know the signal is received by
176 someone */
177 asm volatile ("dmfc0 %0, $22\n"
178 "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
179#endif
180
181 octeon_user_io_init();
182
183 /* to generate the first CPU timer interrupt */
184 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
185}
186
187/**
188 * Hook for after all CPUs are online
189 */
190static void octeon_cpus_done(void)
191{
192#ifdef CONFIG_CAVIUM_GDB
193 unsigned long tmp;
194 /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
195 to be not masked by this core so we know the signal is received by
196 someone */
197 asm volatile ("dmfc0 %0, $22\n"
198 "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
199#endif
200}
201
202struct plat_smp_ops octeon_smp_ops = {
203 .send_ipi_single = octeon_send_ipi_single,
204 .send_ipi_mask = octeon_send_ipi_mask,
205 .init_secondary = octeon_init_secondary,
206 .smp_finish = octeon_smp_finish,
207 .cpus_done = octeon_cpus_done,
208 .boot_secondary = octeon_boot_secondary,
209 .smp_setup = octeon_smp_setup,
210 .prepare_cpus = octeon_prepare_cpus,
211};