aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/cavium-octeon
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2008-12-23 18:22:14 -0500
committerRalf Baechle <ralf@linux-mips.org>2009-01-11 04:57:20 -0500
commit58f07778ce9d32c22cecb1d8ef348001f0e705c9 (patch)
tree8b4462addd9058cbdf8f6085169f48c318fe7478 /arch/mips/cavium-octeon
parent54293ec3074a5fe61abd297502f68b2529a3dab3 (diff)
MIPS: Add Cavium OCTEON processor support files to arch/mips/cavium-octeon/executive and asm/octeon.
These files are used to coordinate resource sharing between all of the programs running on the OCTEON SOC. The OCTEON processor has many CPU cores (current parts have up to 16, but more are possible). It also has a variety of on-chip hardware blocks for things like network acceleration, encryption and RAID. One typical configuration is to run Linux on several of the CPU cores, and other dedicated applications on the other cores. Resource allocation between the various programs running on the system (Linux kernel and other dedicated applications) needs to be coordinated. The code we use to do this we call the 'executive'. All of this resource allocation and sharing code is gathered together in the executive directory. Included in the patch set are the following files: cvmx-bootmem.c and cvmx-sysinfo.c -- Coordinate memory allocation. All memory used by the Linux kernel is obtained here at boot time. cvmx-l2c.c -- Coordinates operations on the shared level 2 cache. octeon-model.c -- Probes chip capabilities and version. The corresponding headers are in asm/octeon. Signed-off-by: Tomaso Paoletti <tpaoletti@caviumnetworks.com> Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org> create mode 100644 arch/mips/cavium-octeon/executive/Makefile create mode 100644 arch/mips/cavium-octeon/executive/cvmx-bootmem.c create mode 100644 arch/mips/cavium-octeon/executive/cvmx-l2c.c create mode 100644 arch/mips/cavium-octeon/executive/cvmx-sysinfo.c create mode 100644 arch/mips/cavium-octeon/executive/octeon-model.c create mode 100644 arch/mips/include/asm/octeon/cvmx-asm.h create mode 100644 arch/mips/include/asm/octeon/cvmx-bootinfo.h create mode 100644 arch/mips/include/asm/octeon/cvmx-bootmem.h create mode 100644 arch/mips/include/asm/octeon/cvmx-l2c.h create mode 100644 arch/mips/include/asm/octeon/cvmx-packet.h create mode 100644 arch/mips/include/asm/octeon/cvmx-spinlock.h create mode 100644 arch/mips/include/asm/octeon/cvmx-sysinfo.h create mode 100644 arch/mips/include/asm/octeon/cvmx.h create mode 100644 arch/mips/include/asm/octeon/octeon-feature.h create mode 100644 arch/mips/include/asm/octeon/octeon-model.h
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile13
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c586
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c734
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c116
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c358
5 files changed, 1807 insertions, 0 deletions
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
new file mode 100644
index 000000000000..80d6cb26766b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the Cavium Octeon specific kernel interface routines
3# under Linux.
4#
5# This file is subject to the terms and conditions of the GNU General Public
6# License. See the file "COPYING" in the main directory of this archive
7# for more details.
8#
9# Copyright (C) 2005-2008 Cavium Networks
10#
11
12obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
13
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
new file mode 100644
index 000000000000..4f5a08b37ccd
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -0,0 +1,586 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Simple allocate only memory allocator. Used to allocate memory at
30 * application start time.
31 */
32
33#include <linux/kernel.h>
34
35#include <asm/octeon/cvmx.h>
36#include <asm/octeon/cvmx-spinlock.h>
37#include <asm/octeon/cvmx-bootmem.h>
38
39/*#define DEBUG */
40
41
42static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
43
44/* See header file for descriptions of functions */
45
46/*
47 * Wrapper functions are provided for reading/writing the size and
48 * next block values as these may not be directly addressible (in 32
49 * bit applications, for instance.) Offsets of data elements in
50 * bootmem list, must match cvmx_bootmem_block_header_t.
51 */
52#define NEXT_OFFSET 0
53#define SIZE_OFFSET 8
54
55static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
56{
57 cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
58}
59
60static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
61{
62 cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
63}
64
65static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
66{
67 return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
68}
69
70static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
71{
72 return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
73}
74
75void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
76 uint64_t min_addr, uint64_t max_addr)
77{
78 int64_t address;
79 address =
80 cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
81
82 if (address > 0)
83 return cvmx_phys_to_ptr(address);
84 else
85 return NULL;
86}
87
88void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
89 uint64_t alignment)
90{
91 return cvmx_bootmem_alloc_range(size, alignment, address,
92 address + size);
93}
94
95void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
96{
97 return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
98}
99
100int cvmx_bootmem_free_named(char *name)
101{
102 return cvmx_bootmem_phy_named_block_free(name, 0);
103}
104
105struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
106{
107 return cvmx_bootmem_phy_named_block_find(name, 0);
108}
109
110void cvmx_bootmem_lock(void)
111{
112 cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
113}
114
115void cvmx_bootmem_unlock(void)
116{
117 cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
118}
119
120int cvmx_bootmem_init(void *mem_desc_ptr)
121{
122 /* Here we set the global pointer to the bootmem descriptor
123 * block. This pointer will be used directly, so we will set
124 * it up to be directly usable by the application. It is set
125 * up as follows for the various runtime/ABI combinations:
126 *
127 * Linux 64 bit: Set XKPHYS bit
128 * Linux 32 bit: use mmap to create mapping, use virtual address
129 * CVMX 64 bit: use physical address directly
130 * CVMX 32 bit: use physical address directly
131 *
132 * Note that the CVMX environment assumes the use of 1-1 TLB
133 * mappings so that the physical addresses can be used
134 * directly
135 */
136 if (!cvmx_bootmem_desc) {
137#if defined(CVMX_ABI_64)
138 /* Set XKPHYS bit */
139 cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
140#else
141 cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
142#endif
143 }
144
145 return 0;
146}
147
148/*
149 * The cvmx_bootmem_phy* functions below return 64 bit physical
150 * addresses, and expose more features that the cvmx_bootmem_functions
151 * above. These are required for full memory space access in 32 bit
152 * applications, as well as for using some advance features. Most
153 * applications should not need to use these.
154 */
155
156int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
157 uint64_t address_max, uint64_t alignment,
158 uint32_t flags)
159{
160
161 uint64_t head_addr;
162 uint64_t ent_addr;
163 /* points to previous list entry, NULL current entry is head of list */
164 uint64_t prev_addr = 0;
165 uint64_t new_ent_addr = 0;
166 uint64_t desired_min_addr;
167
168#ifdef DEBUG
169 cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
170 "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
171 (unsigned long long)req_size,
172 (unsigned long long)address_min,
173 (unsigned long long)address_max,
174 (unsigned long long)alignment);
175#endif
176
177 if (cvmx_bootmem_desc->major_version > 3) {
178 cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
179 "version: %d.%d at addr: %p\n",
180 (int)cvmx_bootmem_desc->major_version,
181 (int)cvmx_bootmem_desc->minor_version,
182 cvmx_bootmem_desc);
183 goto error_out;
184 }
185
186 /*
187 * Do a variety of checks to validate the arguments. The
188 * allocator code will later assume that these checks have
189 * been made. We validate that the requested constraints are
190 * not self-contradictory before we look through the list of
191 * available memory.
192 */
193
194 /* 0 is not a valid req_size for this allocator */
195 if (!req_size)
196 goto error_out;
197
198 /* Round req_size up to mult of minimum alignment bytes */
199 req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
200 ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
201
202 /*
203 * Convert !0 address_min and 0 address_max to special case of
204 * range that specifies an exact memory block to allocate. Do
205 * this before other checks and adjustments so that this
206 * tranformation will be validated.
207 */
208 if (address_min && !address_max)
209 address_max = address_min + req_size;
210 else if (!address_min && !address_max)
211 address_max = ~0ull; /* If no limits given, use max limits */
212
213
214 /*
215 * Enforce minimum alignment (this also keeps the minimum free block
216 * req_size the same as the alignment req_size.
217 */
218 if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
219 alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
220
221 /*
222 * Adjust address minimum based on requested alignment (round
223 * up to meet alignment). Do this here so we can reject
224 * impossible requests up front. (NOP for address_min == 0)
225 */
226 if (alignment)
227 address_min = __ALIGN_MASK(address_min, (alignment - 1));
228
229 /*
230 * Reject inconsistent args. We have adjusted these, so this
231 * may fail due to our internal changes even if this check
232 * would pass for the values the user supplied.
233 */
234 if (req_size > address_max - address_min)
235 goto error_out;
236
237 /* Walk through the list entries - first fit found is returned */
238
239 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
240 cvmx_bootmem_lock();
241 head_addr = cvmx_bootmem_desc->head_addr;
242 ent_addr = head_addr;
243 for (; ent_addr;
244 prev_addr = ent_addr,
245 ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
246 uint64_t usable_base, usable_max;
247 uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
248
249 if (cvmx_bootmem_phy_get_next(ent_addr)
250 && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
251 cvmx_dprintf("Internal bootmem_alloc() error: ent: "
252 "0x%llx, next: 0x%llx\n",
253 (unsigned long long)ent_addr,
254 (unsigned long long)
255 cvmx_bootmem_phy_get_next(ent_addr));
256 goto error_out;
257 }
258
259 /*
260 * Determine if this is an entry that can satisify the
261 * request Check to make sure entry is large enough to
262 * satisfy request.
263 */
264 usable_base =
265 __ALIGN_MASK(max(address_min, ent_addr), alignment - 1);
266 usable_max = min(address_max, ent_addr + ent_size);
267 /*
268 * We should be able to allocate block at address
269 * usable_base.
270 */
271
272 desired_min_addr = usable_base;
273 /*
274 * Determine if request can be satisfied from the
275 * current entry.
276 */
277 if (!((ent_addr + ent_size) > usable_base
278 && ent_addr < address_max
279 && req_size <= usable_max - usable_base))
280 continue;
281 /*
282 * We have found an entry that has room to satisfy the
283 * request, so allocate it from this entry. If end
284 * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
285 * the end of this block rather than the beginning.
286 */
287 if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
288 desired_min_addr = usable_max - req_size;
289 /*
290 * Align desired address down to required
291 * alignment.
292 */
293 desired_min_addr &= ~(alignment - 1);
294 }
295
296 /* Match at start of entry */
297 if (desired_min_addr == ent_addr) {
298 if (req_size < ent_size) {
299 /*
300 * big enough to create a new block
301 * from top portion of block.
302 */
303 new_ent_addr = ent_addr + req_size;
304 cvmx_bootmem_phy_set_next(new_ent_addr,
305 cvmx_bootmem_phy_get_next(ent_addr));
306 cvmx_bootmem_phy_set_size(new_ent_addr,
307 ent_size -
308 req_size);
309
310 /*
311 * Adjust next pointer as following
312 * code uses this.
313 */
314 cvmx_bootmem_phy_set_next(ent_addr,
315 new_ent_addr);
316 }
317
318 /*
319 * adjust prev ptr or head to remove this
320 * entry from list.
321 */
322 if (prev_addr)
323 cvmx_bootmem_phy_set_next(prev_addr,
324 cvmx_bootmem_phy_get_next(ent_addr));
325 else
326 /*
327 * head of list being returned, so
328 * update head ptr.
329 */
330 cvmx_bootmem_desc->head_addr =
331 cvmx_bootmem_phy_get_next(ent_addr);
332
333 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
334 cvmx_bootmem_unlock();
335 return desired_min_addr;
336 }
337 /*
338 * block returned doesn't start at beginning of entry,
339 * so we know that we will be splitting a block off
340 * the front of this one. Create a new block from the
341 * beginning, add to list, and go to top of loop
342 * again.
343 *
344 * create new block from high portion of
345 * block, so that top block starts at desired
346 * addr.
347 */
348 new_ent_addr = desired_min_addr;
349 cvmx_bootmem_phy_set_next(new_ent_addr,
350 cvmx_bootmem_phy_get_next
351 (ent_addr));
352 cvmx_bootmem_phy_set_size(new_ent_addr,
353 cvmx_bootmem_phy_get_size
354 (ent_addr) -
355 (desired_min_addr -
356 ent_addr));
357 cvmx_bootmem_phy_set_size(ent_addr,
358 desired_min_addr - ent_addr);
359 cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
360 /* Loop again to handle actual alloc from new block */
361 }
362error_out:
363 /* We didn't find anything, so return error */
364 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
365 cvmx_bootmem_unlock();
366 return -1;
367}
368
369int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
370{
371 uint64_t cur_addr;
372 uint64_t prev_addr = 0; /* zero is invalid */
373 int retval = 0;
374
375#ifdef DEBUG
376 cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
377 (unsigned long long)phy_addr, (unsigned long long)size);
378#endif
379 if (cvmx_bootmem_desc->major_version > 3) {
380 cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
381 "version: %d.%d at addr: %p\n",
382 (int)cvmx_bootmem_desc->major_version,
383 (int)cvmx_bootmem_desc->minor_version,
384 cvmx_bootmem_desc);
385 return 0;
386 }
387
388 /* 0 is not a valid size for this allocator */
389 if (!size)
390 return 0;
391
392 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
393 cvmx_bootmem_lock();
394 cur_addr = cvmx_bootmem_desc->head_addr;
395 if (cur_addr == 0 || phy_addr < cur_addr) {
396 /* add at front of list - special case with changing head ptr */
397 if (cur_addr && phy_addr + size > cur_addr)
398 goto bootmem_free_done; /* error, overlapping section */
399 else if (phy_addr + size == cur_addr) {
400 /* Add to front of existing first block */
401 cvmx_bootmem_phy_set_next(phy_addr,
402 cvmx_bootmem_phy_get_next
403 (cur_addr));
404 cvmx_bootmem_phy_set_size(phy_addr,
405 cvmx_bootmem_phy_get_size
406 (cur_addr) + size);
407 cvmx_bootmem_desc->head_addr = phy_addr;
408
409 } else {
410 /* New block before first block. OK if cur_addr is 0 */
411 cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
412 cvmx_bootmem_phy_set_size(phy_addr, size);
413 cvmx_bootmem_desc->head_addr = phy_addr;
414 }
415 retval = 1;
416 goto bootmem_free_done;
417 }
418
419 /* Find place in list to add block */
420 while (cur_addr && phy_addr > cur_addr) {
421 prev_addr = cur_addr;
422 cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
423 }
424
425 if (!cur_addr) {
426 /*
427 * We have reached the end of the list, add on to end,
428 * checking to see if we need to combine with last
429 * block
430 */
431 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
432 phy_addr) {
433 cvmx_bootmem_phy_set_size(prev_addr,
434 cvmx_bootmem_phy_get_size
435 (prev_addr) + size);
436 } else {
437 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
438 cvmx_bootmem_phy_set_size(phy_addr, size);
439 cvmx_bootmem_phy_set_next(phy_addr, 0);
440 }
441 retval = 1;
442 goto bootmem_free_done;
443 } else {
444 /*
445 * insert between prev and cur nodes, checking for
446 * merge with either/both.
447 */
448 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
449 phy_addr) {
450 /* Merge with previous */
451 cvmx_bootmem_phy_set_size(prev_addr,
452 cvmx_bootmem_phy_get_size
453 (prev_addr) + size);
454 if (phy_addr + size == cur_addr) {
455 /* Also merge with current */
456 cvmx_bootmem_phy_set_size(prev_addr,
457 cvmx_bootmem_phy_get_size(cur_addr) +
458 cvmx_bootmem_phy_get_size(prev_addr));
459 cvmx_bootmem_phy_set_next(prev_addr,
460 cvmx_bootmem_phy_get_next(cur_addr));
461 }
462 retval = 1;
463 goto bootmem_free_done;
464 } else if (phy_addr + size == cur_addr) {
465 /* Merge with current */
466 cvmx_bootmem_phy_set_size(phy_addr,
467 cvmx_bootmem_phy_get_size
468 (cur_addr) + size);
469 cvmx_bootmem_phy_set_next(phy_addr,
470 cvmx_bootmem_phy_get_next
471 (cur_addr));
472 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
473 retval = 1;
474 goto bootmem_free_done;
475 }
476
477 /* It is a standalone block, add in between prev and cur */
478 cvmx_bootmem_phy_set_size(phy_addr, size);
479 cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
480 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
481
482 }
483 retval = 1;
484
485bootmem_free_done:
486 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
487 cvmx_bootmem_unlock();
488 return retval;
489
490}
491
492struct cvmx_bootmem_named_block_desc *
493 cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
494{
495 unsigned int i;
496 struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
497
498#ifdef DEBUG
499 cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
500#endif
501 /*
502 * Lock the structure to make sure that it is not being
503 * changed while we are examining it.
504 */
505 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
506 cvmx_bootmem_lock();
507
508 /* Use XKPHYS for 64 bit linux */
509 named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
510 cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
511
512#ifdef DEBUG
513 cvmx_dprintf
514 ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
515 named_block_array_ptr);
516#endif
517 if (cvmx_bootmem_desc->major_version == 3) {
518 for (i = 0;
519 i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
520 if ((name && named_block_array_ptr[i].size
521 && !strncmp(name, named_block_array_ptr[i].name,
522 cvmx_bootmem_desc->named_block_name_len
523 - 1))
524 || (!name && !named_block_array_ptr[i].size)) {
525 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
526 cvmx_bootmem_unlock();
527
528 return &(named_block_array_ptr[i]);
529 }
530 }
531 } else {
532 cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
533 "version: %d.%d at addr: %p\n",
534 (int)cvmx_bootmem_desc->major_version,
535 (int)cvmx_bootmem_desc->minor_version,
536 cvmx_bootmem_desc);
537 }
538 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
539 cvmx_bootmem_unlock();
540
541 return NULL;
542}
543
544int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
545{
546 struct cvmx_bootmem_named_block_desc *named_block_ptr;
547
548 if (cvmx_bootmem_desc->major_version != 3) {
549 cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
550 "%d.%d at addr: %p\n",
551 (int)cvmx_bootmem_desc->major_version,
552 (int)cvmx_bootmem_desc->minor_version,
553 cvmx_bootmem_desc);
554 return 0;
555 }
556#ifdef DEBUG
557 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
558#endif
559
560 /*
561 * Take lock here, as name lookup/block free/name free need to
562 * be atomic.
563 */
564 cvmx_bootmem_lock();
565
566 named_block_ptr =
567 cvmx_bootmem_phy_named_block_find(name,
568 CVMX_BOOTMEM_FLAG_NO_LOCKING);
569 if (named_block_ptr) {
570#ifdef DEBUG
571 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
572 "%s, base: 0x%llx, size: 0x%llx\n",
573 name,
574 (unsigned long long)named_block_ptr->base_addr,
575 (unsigned long long)named_block_ptr->size);
576#endif
577 __cvmx_bootmem_phy_free(named_block_ptr->base_addr,
578 named_block_ptr->size,
579 CVMX_BOOTMEM_FLAG_NO_LOCKING);
580 named_block_ptr->size = 0;
581 /* Set size to zero to indicate block not used. */
582 }
583
584 cvmx_bootmem_unlock();
585 return named_block_ptr != NULL; /* 0 on failure, 1 on success */
586}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
new file mode 100644
index 000000000000..6abe56f1e097
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -0,0 +1,734 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Implementation of the Level 2 Cache (L2C) control, measurement, and
30 * debugging facilities.
31 */
32
33#include <asm/octeon/cvmx.h>
34#include <asm/octeon/cvmx-l2c.h>
35#include <asm/octeon/cvmx-spinlock.h>
36
37/*
38 * This spinlock is used internally to ensure that only one core is
39 * performing certain L2 operations at a time.
40 *
41 * NOTE: This only protects calls from within a single application -
42 * if multiple applications or operating systems are running, then it
43 * is up to the user program to coordinate between them.
44 */
45static cvmx_spinlock_t cvmx_l2c_spinlock;
46
47static inline int l2_size_half(void)
48{
49 uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
50 return !!(val & (1ull << 34));
51}
52
53int cvmx_l2c_get_core_way_partition(uint32_t core)
54{
55 uint32_t field;
56
57 /* Validate the core number */
58 if (core >= cvmx_octeon_num_cores())
59 return -1;
60
61 /*
62 * Use the lower two bits of the coreNumber to determine the
63 * bit offset of the UMSK[] field in the L2C_SPAR register.
64 */
65 field = (core & 0x3) * 8;
66
67 /*
68 * Return the UMSK[] field from the appropriate L2C_SPAR
69 * register based on the coreNumber.
70 */
71
72 switch (core & 0xC) {
73 case 0x0:
74 return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
75 field;
76 case 0x4:
77 return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
78 field;
79 case 0x8:
80 return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
81 field;
82 case 0xC:
83 return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
84 field;
85 }
86 return 0;
87}
88
89int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
90{
91 uint32_t field;
92 uint32_t valid_mask;
93
94 valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
95
96 mask &= valid_mask;
97
98 /* A UMSK setting which blocks all L2C Ways is an error. */
99 if (mask == valid_mask)
100 return -1;
101
102 /* Validate the core number */
103 if (core >= cvmx_octeon_num_cores())
104 return -1;
105
106 /* Check to make sure current mask & new mask don't block all ways */
107 if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
108 valid_mask)
109 return -1;
110
111 /* Use the lower two bits of core to determine the bit offset of the
112 * UMSK[] field in the L2C_SPAR register.
113 */
114 field = (core & 0x3) * 8;
115
116 /* Assign the new mask setting to the UMSK[] field in the appropriate
117 * L2C_SPAR register based on the core_num.
118 *
119 */
120 switch (core & 0xC) {
121 case 0x0:
122 cvmx_write_csr(CVMX_L2C_SPAR0,
123 (cvmx_read_csr(CVMX_L2C_SPAR0) &
124 ~(0xFF << field)) | mask << field);
125 break;
126 case 0x4:
127 cvmx_write_csr(CVMX_L2C_SPAR1,
128 (cvmx_read_csr(CVMX_L2C_SPAR1) &
129 ~(0xFF << field)) | mask << field);
130 break;
131 case 0x8:
132 cvmx_write_csr(CVMX_L2C_SPAR2,
133 (cvmx_read_csr(CVMX_L2C_SPAR2) &
134 ~(0xFF << field)) | mask << field);
135 break;
136 case 0xC:
137 cvmx_write_csr(CVMX_L2C_SPAR3,
138 (cvmx_read_csr(CVMX_L2C_SPAR3) &
139 ~(0xFF << field)) | mask << field);
140 break;
141 }
142 return 0;
143}
144
145int cvmx_l2c_set_hw_way_partition(uint32_t mask)
146{
147 uint32_t valid_mask;
148
149 valid_mask = 0xff;
150
151 if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
152 if (l2_size_half())
153 valid_mask = 0xf;
154 } else if (l2_size_half())
155 valid_mask = 0x3;
156
157 mask &= valid_mask;
158
159 /* A UMSK setting which blocks all L2C Ways is an error. */
160 if (mask == valid_mask)
161 return -1;
162 /* Check to make sure current mask & new mask don't block all ways */
163 if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
164 valid_mask)
165 return -1;
166
167 cvmx_write_csr(CVMX_L2C_SPAR4,
168 (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
169 return 0;
170}
171
172int cvmx_l2c_get_hw_way_partition(void)
173{
174 return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
175}
176
177void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
178 uint32_t clear_on_read)
179{
180 union cvmx_l2c_pfctl pfctl;
181
182 pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
183
184 switch (counter) {
185 case 0:
186 pfctl.s.cnt0sel = event;
187 pfctl.s.cnt0ena = 1;
188 if (!cvmx_octeon_is_pass1())
189 pfctl.s.cnt0rdclr = clear_on_read;
190 break;
191 case 1:
192 pfctl.s.cnt1sel = event;
193 pfctl.s.cnt1ena = 1;
194 if (!cvmx_octeon_is_pass1())
195 pfctl.s.cnt1rdclr = clear_on_read;
196 break;
197 case 2:
198 pfctl.s.cnt2sel = event;
199 pfctl.s.cnt2ena = 1;
200 if (!cvmx_octeon_is_pass1())
201 pfctl.s.cnt2rdclr = clear_on_read;
202 break;
203 case 3:
204 default:
205 pfctl.s.cnt3sel = event;
206 pfctl.s.cnt3ena = 1;
207 if (!cvmx_octeon_is_pass1())
208 pfctl.s.cnt3rdclr = clear_on_read;
209 break;
210 }
211
212 cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
213}
214
215uint64_t cvmx_l2c_read_perf(uint32_t counter)
216{
217 switch (counter) {
218 case 0:
219 return cvmx_read_csr(CVMX_L2C_PFC0);
220 case 1:
221 return cvmx_read_csr(CVMX_L2C_PFC1);
222 case 2:
223 return cvmx_read_csr(CVMX_L2C_PFC2);
224 case 3:
225 default:
226 return cvmx_read_csr(CVMX_L2C_PFC3);
227 }
228}
229
230/**
231 * @INTERNAL
232 * Helper function use to fault in cache lines for L2 cache locking
233 *
234 * @addr: Address of base of memory region to read into L2 cache
235 * @len: Length (in bytes) of region to fault in
236 */
237static void fault_in(uint64_t addr, int len)
238{
239 volatile char *ptr;
240 volatile char dummy;
241 /*
242 * Adjust addr and length so we get all cache lines even for
243 * small ranges spanning two cache lines
244 */
245 len += addr & CVMX_CACHE_LINE_MASK;
246 addr &= ~CVMX_CACHE_LINE_MASK;
247 ptr = (volatile char *)cvmx_phys_to_ptr(addr);
248 /*
249 * Invalidate L1 cache to make sure all loads result in data
250 * being in L2.
251 */
252 CVMX_DCACHE_INVALIDATE;
253 while (len > 0) {
254 dummy += *ptr;
255 len -= CVMX_CACHE_LINE_SIZE;
256 ptr += CVMX_CACHE_LINE_SIZE;
257 }
258}
259
260int cvmx_l2c_lock_line(uint64_t addr)
261{
262 int retval = 0;
263 union cvmx_l2c_dbg l2cdbg;
264 union cvmx_l2c_lckbase lckbase;
265 union cvmx_l2c_lckoff lckoff;
266 union cvmx_l2t_err l2t_err;
267 l2cdbg.u64 = 0;
268 lckbase.u64 = 0;
269 lckoff.u64 = 0;
270
271 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
272
273 /* Clear l2t error bits if set */
274 l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
275 l2t_err.s.lckerr = 1;
276 l2t_err.s.lckerr2 = 1;
277 cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
278
279 addr &= ~CVMX_CACHE_LINE_MASK;
280
281 /* Set this core as debug core */
282 l2cdbg.s.ppnum = cvmx_get_core_num();
283 CVMX_SYNC;
284 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
285 cvmx_read_csr(CVMX_L2C_DBG);
286
287 lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
288 cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
289 cvmx_read_csr(CVMX_L2C_LCKOFF);
290
291 if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
292 int alias_shift =
293 CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
294 uint64_t addr_tmp =
295 addr ^ (addr & ((1 << alias_shift) - 1)) >>
296 CVMX_L2_SET_BITS;
297 lckbase.s.lck_base = addr_tmp >> 7;
298 } else {
299 lckbase.s.lck_base = addr >> 7;
300 }
301
302 lckbase.s.lck_ena = 1;
303 cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
304 cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
305
306 fault_in(addr, CVMX_CACHE_LINE_SIZE);
307
308 lckbase.s.lck_ena = 0;
309 cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
310 cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
311
312 /* Stop being debug core */
313 cvmx_write_csr(CVMX_L2C_DBG, 0);
314 cvmx_read_csr(CVMX_L2C_DBG);
315
316 l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
317 if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
318 retval = 1; /* We were unable to lock the line */
319
320 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
321
322 return retval;
323}
324
325int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
326{
327 int retval = 0;
328
329 /* Round start/end to cache line boundaries */
330 len += start & CVMX_CACHE_LINE_MASK;
331 start &= ~CVMX_CACHE_LINE_MASK;
332 len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
333
334 while (len) {
335 retval += cvmx_l2c_lock_line(start);
336 start += CVMX_CACHE_LINE_SIZE;
337 len -= CVMX_CACHE_LINE_SIZE;
338 }
339
340 return retval;
341}
342
343void cvmx_l2c_flush(void)
344{
345 uint64_t assoc, set;
346 uint64_t n_assoc, n_set;
347 union cvmx_l2c_dbg l2cdbg;
348
349 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
350
351 l2cdbg.u64 = 0;
352 if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
353 l2cdbg.s.ppnum = cvmx_get_core_num();
354 l2cdbg.s.finv = 1;
355 n_set = CVMX_L2_SETS;
356 n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
357 for (set = 0; set < n_set; set++) {
358 for (assoc = 0; assoc < n_assoc; assoc++) {
359 l2cdbg.s.set = assoc;
360 /* Enter debug mode, and make sure all other
361 ** writes complete before we enter debug
362 ** mode */
363 CVMX_SYNCW;
364 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
365 cvmx_read_csr(CVMX_L2C_DBG);
366
367 CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
368 (CVMX_MIPS_SPACE_XKPHYS,
369 set * CVMX_CACHE_LINE_SIZE), 0);
370 CVMX_SYNCW; /* Push STF out to L2 */
371 /* Exit debug mode */
372 CVMX_SYNC;
373 cvmx_write_csr(CVMX_L2C_DBG, 0);
374 cvmx_read_csr(CVMX_L2C_DBG);
375 }
376 }
377
378 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
379}
380
381int cvmx_l2c_unlock_line(uint64_t address)
382{
383 int assoc;
384 union cvmx_l2c_tag tag;
385 union cvmx_l2c_dbg l2cdbg;
386 uint32_t tag_addr;
387
388 uint32_t index = cvmx_l2c_address_to_index(address);
389
390 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
391 /* Compute portion of address that is stored in tag */
392 tag_addr =
393 ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
394 ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
395 for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
396 tag = cvmx_get_l2c_tag(assoc, index);
397
398 if (tag.s.V && (tag.s.addr == tag_addr)) {
399 l2cdbg.u64 = 0;
400 l2cdbg.s.ppnum = cvmx_get_core_num();
401 l2cdbg.s.set = assoc;
402 l2cdbg.s.finv = 1;
403
404 CVMX_SYNC;
405 /* Enter debug mode */
406 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
407 cvmx_read_csr(CVMX_L2C_DBG);
408
409 CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
410 (CVMX_MIPS_SPACE_XKPHYS,
411 address), 0);
412 CVMX_SYNC;
413 /* Exit debug mode */
414 cvmx_write_csr(CVMX_L2C_DBG, 0);
415 cvmx_read_csr(CVMX_L2C_DBG);
416 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
417 return tag.s.L;
418 }
419 }
420 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
421 return 0;
422}
423
424int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
425{
426 int num_unlocked = 0;
427 /* Round start/end to cache line boundaries */
428 len += start & CVMX_CACHE_LINE_MASK;
429 start &= ~CVMX_CACHE_LINE_MASK;
430 len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
431 while (len > 0) {
432 num_unlocked += cvmx_l2c_unlock_line(start);
433 start += CVMX_CACHE_LINE_SIZE;
434 len -= CVMX_CACHE_LINE_SIZE;
435 }
436
437 return num_unlocked;
438}
439
440/*
441 * Internal l2c tag types. These are converted to a generic structure
442 * that can be used on all chips.
443 */
444union __cvmx_l2c_tag {
445 uint64_t u64;
446 struct cvmx_l2c_tag_cn50xx {
447 uint64_t reserved:40;
448 uint64_t V:1; /* Line valid */
449 uint64_t D:1; /* Line dirty */
450 uint64_t L:1; /* Line locked */
451 uint64_t U:1; /* Use, LRU eviction */
452 uint64_t addr:20; /* Phys mem addr (33..14) */
453 } cn50xx;
454 struct cvmx_l2c_tag_cn30xx {
455 uint64_t reserved:41;
456 uint64_t V:1; /* Line valid */
457 uint64_t D:1; /* Line dirty */
458 uint64_t L:1; /* Line locked */
459 uint64_t U:1; /* Use, LRU eviction */
460 uint64_t addr:19; /* Phys mem addr (33..15) */
461 } cn30xx;
462 struct cvmx_l2c_tag_cn31xx {
463 uint64_t reserved:42;
464 uint64_t V:1; /* Line valid */
465 uint64_t D:1; /* Line dirty */
466 uint64_t L:1; /* Line locked */
467 uint64_t U:1; /* Use, LRU eviction */
468 uint64_t addr:18; /* Phys mem addr (33..16) */
469 } cn31xx;
470 struct cvmx_l2c_tag_cn38xx {
471 uint64_t reserved:43;
472 uint64_t V:1; /* Line valid */
473 uint64_t D:1; /* Line dirty */
474 uint64_t L:1; /* Line locked */
475 uint64_t U:1; /* Use, LRU eviction */
476 uint64_t addr:17; /* Phys mem addr (33..17) */
477 } cn38xx;
478 struct cvmx_l2c_tag_cn58xx {
479 uint64_t reserved:44;
480 uint64_t V:1; /* Line valid */
481 uint64_t D:1; /* Line dirty */
482 uint64_t L:1; /* Line locked */
483 uint64_t U:1; /* Use, LRU eviction */
484 uint64_t addr:16; /* Phys mem addr (33..18) */
485 } cn58xx;
486 struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
487 struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
488};
489
490/**
491 * @INTERNAL
492 * Function to read a L2C tag. This code make the current core
493 * the 'debug core' for the L2. This code must only be executed by
494 * 1 core at a time.
495 *
496 * @assoc: Association (way) of the tag to dump
497 * @index: Index of the cacheline
498 *
499 * Returns The Octeon model specific tag structure. This is
500 * translated by a wrapper function to a generic form that is
501 * easier for applications to use.
502 */
503static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
504{
505
506 uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
507 uint64_t core = cvmx_get_core_num();
508 union __cvmx_l2c_tag tag_val;
509 uint64_t dbg_addr = CVMX_L2C_DBG;
510 unsigned long flags;
511
512 union cvmx_l2c_dbg debug_val;
513 debug_val.u64 = 0;
514 /*
515 * For low core count parts, the core number is always small enough
516 * to stay in the correct field and not set any reserved bits.
517 */
518 debug_val.s.ppnum = core;
519 debug_val.s.l2t = 1;
520 debug_val.s.set = assoc;
521 /*
522 * Make sure core is quiet (no prefetches, etc.) before
523 * entering debug mode.
524 */
525 CVMX_SYNC;
526 /* Flush L1 to make sure debug load misses L1 */
527 CVMX_DCACHE_INVALIDATE;
528
529 local_irq_save(flags);
530
531 /*
532 * The following must be done in assembly as when in debug
533 * mode all data loads from L2 return special debug data, not
534 * normal memory contents. Also, interrupts must be
535 * disabled, since if an interrupt occurs while in debug mode
536 * the ISR will get debug data from all its memory reads
537 * instead of the contents of memory
538 */
539
540 asm volatile (".set push \n"
541 " .set mips64 \n"
542 " .set noreorder \n"
543 /* Enter debug mode, wait for store */
544 " sd %[dbg_val], 0(%[dbg_addr]) \n"
545 " ld $0, 0(%[dbg_addr]) \n"
546 /* Read L2C tag data */
547 " ld %[tag_val], 0(%[tag_addr]) \n"
548 /* Exit debug mode, wait for store */
549 " sd $0, 0(%[dbg_addr]) \n"
550 " ld $0, 0(%[dbg_addr]) \n"
551 /* Invalidate dcache to discard debug data */
552 " cache 9, 0($0) \n"
553 " .set pop" :
554 [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
555 [dbg_val] "r"(debug_val.u64),
556 [tag_addr] "r"(debug_tag_addr) : "memory");
557
558 local_irq_restore(flags);
559 return tag_val;
560
561}
562
563union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
564{
565 union __cvmx_l2c_tag tmp_tag;
566 union cvmx_l2c_tag tag;
567 tag.u64 = 0;
568
569 if ((int)association >= cvmx_l2c_get_num_assoc()) {
570 cvmx_dprintf
571 ("ERROR: cvmx_get_l2c_tag association out of range\n");
572 return tag;
573 }
574 if ((int)index >= cvmx_l2c_get_num_sets()) {
575 cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
576 "index out of range (arg: %d, max: %d\n",
577 index, cvmx_l2c_get_num_sets());
578 return tag;
579 }
580 /* __read_l2_tag is intended for internal use only */
581 tmp_tag = __read_l2_tag(association, index);
582
583 /*
584 * Convert all tag structure types to generic version, as it
585 * can represent all models.
586 */
587 if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
588 tag.s.V = tmp_tag.cn58xx.V;
589 tag.s.D = tmp_tag.cn58xx.D;
590 tag.s.L = tmp_tag.cn58xx.L;
591 tag.s.U = tmp_tag.cn58xx.U;
592 tag.s.addr = tmp_tag.cn58xx.addr;
593 } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
594 tag.s.V = tmp_tag.cn38xx.V;
595 tag.s.D = tmp_tag.cn38xx.D;
596 tag.s.L = tmp_tag.cn38xx.L;
597 tag.s.U = tmp_tag.cn38xx.U;
598 tag.s.addr = tmp_tag.cn38xx.addr;
599 } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
600 || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
601 tag.s.V = tmp_tag.cn31xx.V;
602 tag.s.D = tmp_tag.cn31xx.D;
603 tag.s.L = tmp_tag.cn31xx.L;
604 tag.s.U = tmp_tag.cn31xx.U;
605 tag.s.addr = tmp_tag.cn31xx.addr;
606 } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
607 tag.s.V = tmp_tag.cn30xx.V;
608 tag.s.D = tmp_tag.cn30xx.D;
609 tag.s.L = tmp_tag.cn30xx.L;
610 tag.s.U = tmp_tag.cn30xx.U;
611 tag.s.addr = tmp_tag.cn30xx.addr;
612 } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
613 tag.s.V = tmp_tag.cn50xx.V;
614 tag.s.D = tmp_tag.cn50xx.D;
615 tag.s.L = tmp_tag.cn50xx.L;
616 tag.s.U = tmp_tag.cn50xx.U;
617 tag.s.addr = tmp_tag.cn50xx.addr;
618 } else {
619 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
620 }
621
622 return tag;
623}
624
625uint32_t cvmx_l2c_address_to_index(uint64_t addr)
626{
627 uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
628 union cvmx_l2c_cfg l2c_cfg;
629 l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
630
631 if (l2c_cfg.s.idxalias) {
632 idx ^=
633 ((addr & CVMX_L2C_ALIAS_MASK) >>
634 CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
635 }
636 idx &= CVMX_L2C_IDX_MASK;
637 return idx;
638}
639
640int cvmx_l2c_get_cache_size_bytes(void)
641{
642 return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
643 CVMX_CACHE_LINE_SIZE;
644}
645
646/**
647 * Return log base 2 of the number of sets in the L2 cache
648 * Returns
649 */
650int cvmx_l2c_get_set_bits(void)
651{
652 int l2_set_bits;
653 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
654 l2_set_bits = 11; /* 2048 sets */
655 else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
656 l2_set_bits = 10; /* 1024 sets */
657 else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
658 || OCTEON_IS_MODEL(OCTEON_CN52XX))
659 l2_set_bits = 9; /* 512 sets */
660 else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
661 l2_set_bits = 8; /* 256 sets */
662 else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
663 l2_set_bits = 7; /* 128 sets */
664 else {
665 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
666 l2_set_bits = 11; /* 2048 sets */
667 }
668 return l2_set_bits;
669
670}
671
672/* Return the number of sets in the L2 Cache */
673int cvmx_l2c_get_num_sets(void)
674{
675 return 1 << cvmx_l2c_get_set_bits();
676}
677
678/* Return the number of associations in the L2 Cache */
679int cvmx_l2c_get_num_assoc(void)
680{
681 int l2_assoc;
682 if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
683 OCTEON_IS_MODEL(OCTEON_CN52XX) ||
684 OCTEON_IS_MODEL(OCTEON_CN58XX) ||
685 OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
686 l2_assoc = 8;
687 else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
688 OCTEON_IS_MODEL(OCTEON_CN30XX))
689 l2_assoc = 4;
690 else {
691 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
692 l2_assoc = 8;
693 }
694
695 /* Check to see if part of the cache is disabled */
696 if (cvmx_fuse_read(265))
697 l2_assoc = l2_assoc >> 2;
698 else if (cvmx_fuse_read(264))
699 l2_assoc = l2_assoc >> 1;
700
701 return l2_assoc;
702}
703
704/**
705 * Flush a line from the L2 cache
706 * This should only be called from one core at a time, as this routine
707 * sets the core to the 'debug' core in order to flush the line.
708 *
709 * @assoc: Association (or way) to flush
710 * @index: Index to flush
711 */
712void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
713{
714 union cvmx_l2c_dbg l2cdbg;
715
716 l2cdbg.u64 = 0;
717 l2cdbg.s.ppnum = cvmx_get_core_num();
718 l2cdbg.s.finv = 1;
719
720 l2cdbg.s.set = assoc;
721 /*
722 * Enter debug mode, and make sure all other writes complete
723 * before we enter debug mode.
724 */
725 asm volatile ("sync" : : : "memory");
726 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
727 cvmx_read_csr(CVMX_L2C_DBG);
728
729 CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
730 /* Exit debug mode */
731 asm volatile ("sync" : : : "memory");
732 cvmx_write_csr(CVMX_L2C_DBG, 0);
733 cvmx_read_csr(CVMX_L2C_DBG);
734}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
new file mode 100644
index 000000000000..4812370706a1
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -0,0 +1,116 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * This module provides system/board/application information obtained
30 * by the bootloader.
31 */
32
33#include <asm/octeon/cvmx.h>
34#include <asm/octeon/cvmx-spinlock.h>
35#include <asm/octeon/cvmx-sysinfo.h>
36
37/**
38 * This structure defines the private state maintained by sysinfo module.
39 *
40 */
41static struct {
42 struct cvmx_sysinfo sysinfo; /* system information */
43 cvmx_spinlock_t lock; /* mutex spinlock */
44
45} state = {
46 .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
47};
48
49
50/*
51 * Global variables that define the min/max of the memory region set
52 * up for 32 bit userspace access.
53 */
54uint64_t linux_mem32_min;
55uint64_t linux_mem32_max;
56uint64_t linux_mem32_wired;
57uint64_t linux_mem32_offset;
58
59/**
60 * This function returns the application information as obtained
61 * by the bootloader. This provides the core mask of the cores
62 * running the same application image, as well as the physical
63 * memory regions available to the core.
64 *
65 * Returns Pointer to the boot information structure
66 *
67 */
68struct cvmx_sysinfo *cvmx_sysinfo_get(void)
69{
70 return &(state.sysinfo);
71}
72
73/**
74 * This function is used in non-simple executive environments (such as
75 * Linux kernel, u-boot, etc.) to configure the minimal fields that
76 * are required to use simple executive files directly.
77 *
78 * Locking (if required) must be handled outside of this
79 * function
80 *
81 * @phy_mem_desc_ptr:
82 * Pointer to global physical memory descriptor
83 * (bootmem descriptor) @board_type: Octeon board
84 * type enumeration
85 *
86 * @board_rev_major:
87 * Board major revision
88 * @board_rev_minor:
89 * Board minor revision
90 * @cpu_clock_hz:
91 * CPU clock freqency in hertz
92 *
93 * Returns 0: Failure
94 * 1: success
95 */
96int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
97 uint16_t board_type,
98 uint8_t board_rev_major,
99 uint8_t board_rev_minor,
100 uint32_t cpu_clock_hz)
101{
102
103 /* The sysinfo structure was already initialized */
104 if (state.sysinfo.board_type)
105 return 0;
106
107 memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
108 state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
109 state.sysinfo.board_type = board_type;
110 state.sysinfo.board_rev_major = board_rev_major;
111 state.sysinfo.board_rev_minor = board_rev_minor;
112 state.sysinfo.cpu_clock_hz = cpu_clock_hz;
113
114 return 1;
115}
116
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
new file mode 100644
index 000000000000..9afc3794ed1b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -0,0 +1,358 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * File defining functions for working with different Octeon
30 * models.
31 */
32#include <asm/octeon/octeon.h>
33
34/**
35 * Given the chip processor ID from COP0, this function returns a
36 * string representing the chip model number. The string is of the
37 * form CNXXXXpX.X-FREQ-SUFFIX.
38 * - XXXX = The chip model number
39 * - X.X = Chip pass number
40 * - FREQ = Current frequency in Mhz
41 * - SUFFIX = NSP, EXP, SCP, SSP, or CP
42 *
43 * @chip_id: Chip ID
44 *
45 * Returns Model string
46 */
47const char *octeon_model_get_string(uint32_t chip_id)
48{
49 static char buffer[32];
50 return octeon_model_get_string_buffer(chip_id, buffer);
51}
52
53/*
54 * Version of octeon_model_get_string() that takes buffer as argument,
55 * as running early in u-boot static/global variables don't work when
56 * running from flash.
57 */
58const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
59{
60 const char *family;
61 const char *core_model;
62 char pass[4];
63 int clock_mhz;
64 const char *suffix;
65 union cvmx_l2d_fus3 fus3;
66 int num_cores;
67 union cvmx_mio_fus_dat2 fus_dat2;
68 union cvmx_mio_fus_dat3 fus_dat3;
69 char fuse_model[10];
70 uint32_t fuse_data = 0;
71
72 fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
73 fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
74 fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
75
76 num_cores = cvmx_octeon_num_cores();
77
78 /* Make sure the non existant devices look disabled */
79 switch ((chip_id >> 8) & 0xff) {
80 case 6: /* CN50XX */
81 case 2: /* CN30XX */
82 fus_dat3.s.nodfa_dte = 1;
83 fus_dat3.s.nozip = 1;
84 break;
85 case 4: /* CN57XX or CN56XX */
86 fus_dat3.s.nodfa_dte = 1;
87 break;
88 default:
89 break;
90 }
91
92 /* Make a guess at the suffix */
93 /* NSP = everything */
94 /* EXP = No crypto */
95 /* SCP = No DFA, No zip */
96 /* CP = No DFA, No crypto, No zip */
97 if (fus_dat3.s.nodfa_dte) {
98 if (fus_dat2.s.nocrypto)
99 suffix = "CP";
100 else
101 suffix = "SCP";
102 } else if (fus_dat2.s.nocrypto)
103 suffix = "EXP";
104 else
105 suffix = "NSP";
106
107 /*
108 * Assume pass number is encoded using <5:3><2:0>. Exceptions
109 * will be fixed later.
110 */
111 sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7);
112
113 /*
114 * Use the number of cores to determine the last 2 digits of
115 * the model number. There are some exceptions that are fixed
116 * later.
117 */
118 switch (num_cores) {
119 case 16:
120 core_model = "60";
121 break;
122 case 15:
123 core_model = "58";
124 break;
125 case 14:
126 core_model = "55";
127 break;
128 case 13:
129 core_model = "52";
130 break;
131 case 12:
132 core_model = "50";
133 break;
134 case 11:
135 core_model = "48";
136 break;
137 case 10:
138 core_model = "45";
139 break;
140 case 9:
141 core_model = "42";
142 break;
143 case 8:
144 core_model = "40";
145 break;
146 case 7:
147 core_model = "38";
148 break;
149 case 6:
150 core_model = "34";
151 break;
152 case 5:
153 core_model = "32";
154 break;
155 case 4:
156 core_model = "30";
157 break;
158 case 3:
159 core_model = "25";
160 break;
161 case 2:
162 core_model = "20";
163 break;
164 case 1:
165 core_model = "10";
166 break;
167 default:
168 core_model = "XX";
169 break;
170 }
171
172 /* Now figure out the family, the first two digits */
173 switch ((chip_id >> 8) & 0xff) {
174 case 0: /* CN38XX, CN37XX or CN36XX */
175 if (fus3.cn38xx.crip_512k) {
176 /*
177 * For some unknown reason, the 16 core one is
178 * called 37 instead of 36.
179 */
180 if (num_cores >= 16)
181 family = "37";
182 else
183 family = "36";
184 } else
185 family = "38";
186 /*
187 * This series of chips didn't follow the standard
188 * pass numbering.
189 */
190 switch (chip_id & 0xf) {
191 case 0:
192 strcpy(pass, "1.X");
193 break;
194 case 1:
195 strcpy(pass, "2.X");
196 break;
197 case 3:
198 strcpy(pass, "3.X");
199 break;
200 default:
201 strcpy(pass, "X.X");
202 break;
203 }
204 break;
205 case 1: /* CN31XX or CN3020 */
206 if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
207 family = "30";
208 else
209 family = "31";
210 /*
211 * This series of chips didn't follow the standard
212 * pass numbering.
213 */
214 switch (chip_id & 0xf) {
215 case 0:
216 strcpy(pass, "1.0");
217 break;
218 case 2:
219 strcpy(pass, "1.1");
220 break;
221 default:
222 strcpy(pass, "X.X");
223 break;
224 }
225 break;
226 case 2: /* CN3010 or CN3005 */
227 family = "30";
228 /* A chip with half cache is an 05 */
229 if (fus3.cn30xx.crip_64k)
230 core_model = "05";
231 /*
232 * This series of chips didn't follow the standard
233 * pass numbering.
234 */
235 switch (chip_id & 0xf) {
236 case 0:
237 strcpy(pass, "1.0");
238 break;
239 case 2:
240 strcpy(pass, "1.1");
241 break;
242 default:
243 strcpy(pass, "X.X");
244 break;
245 }
246 break;
247 case 3: /* CN58XX */
248 family = "58";
249 /* Special case. 4 core, no crypto */
250 if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto)
251 core_model = "29";
252
253 /* Pass 1 uses different encodings for pass numbers */
254 if ((chip_id & 0xFF) < 0x8) {
255 switch (chip_id & 0x3) {
256 case 0:
257 strcpy(pass, "1.0");
258 break;
259 case 1:
260 strcpy(pass, "1.1");
261 break;
262 case 3:
263 strcpy(pass, "1.2");
264 break;
265 default:
266 strcpy(pass, "1.X");
267 break;
268 }
269 }
270 break;
271 case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
272 if (fus_dat2.cn56xx.raid_en) {
273 if (fus3.cn56xx.crip_1024k)
274 family = "55";
275 else
276 family = "57";
277 if (fus_dat2.cn56xx.nocrypto)
278 suffix = "SP";
279 else
280 suffix = "SSP";
281 } else {
282 if (fus_dat2.cn56xx.nocrypto)
283 suffix = "CP";
284 else {
285 suffix = "NSP";
286 if (fus_dat3.s.nozip)
287 suffix = "SCP";
288 }
289 if (fus3.cn56xx.crip_1024k)
290 family = "54";
291 else
292 family = "56";
293 }
294 break;
295 case 6: /* CN50XX */
296 family = "50";
297 break;
298 case 7: /* CN52XX */
299 if (fus3.cn52xx.crip_256k)
300 family = "51";
301 else
302 family = "52";
303 break;
304 default:
305 family = "XX";
306 core_model = "XX";
307 strcpy(pass, "X.X");
308 suffix = "XXX";
309 break;
310 }
311
312 clock_mhz = octeon_get_clock_rate() / 1000000;
313
314 if (family[0] != '3') {
315 /* Check for model in fuses, overrides normal decode */
316 /* This is _not_ valid for Octeon CN3XXX models */
317 fuse_data |= cvmx_fuse_read_byte(51);
318 fuse_data = fuse_data << 8;
319 fuse_data |= cvmx_fuse_read_byte(50);
320 fuse_data = fuse_data << 8;
321 fuse_data |= cvmx_fuse_read_byte(49);
322 fuse_data = fuse_data << 8;
323 fuse_data |= cvmx_fuse_read_byte(48);
324 if (fuse_data & 0x7ffff) {
325 int model = fuse_data & 0x3fff;
326 int suffix = (fuse_data >> 14) & 0x1f;
327 if (suffix && model) {
328 /*
329 * Have both number and suffix in
330 * fuses, so both
331 */
332 sprintf(fuse_model, "%d%c",
333 model, 'A' + suffix - 1);
334 core_model = "";
335 family = fuse_model;
336 } else if (suffix && !model) {
337 /*
338 * Only have suffix, so add suffix to
339 * 'normal' model number.
340 */
341 sprintf(fuse_model, "%s%c", core_model,
342 'A' + suffix - 1);
343 core_model = fuse_model;
344 } else {
345 /*
346 * Don't have suffix, so just use
347 * model from fuses.
348 */
349 sprintf(fuse_model, "%d", model);
350 core_model = "";
351 family = fuse_model;
352 }
353 }
354 }
355 sprintf(buffer, "CN%s%sp%s-%d-%s",
356 family, core_model, pass, clock_mhz, suffix);
357 return buffer;
358}