diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/mips/mm/c-sb1.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/mips/mm/c-sb1.c')
-rw-r--r-- | arch/mips/mm/c-sb1.c | 558 |
1 files changed, 558 insertions, 0 deletions
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c new file mode 100644 index 000000000000..ab30afd63b32 --- /dev/null +++ b/arch/mips/mm/c-sb1.c | |||
@@ -0,0 +1,558 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | ||
3 | * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) | ||
4 | * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation | ||
5 | * Copyright (C) 2004 Maciej W. Rozycki | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2 | ||
10 | * of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | #include <linux/config.h> | ||
22 | #include <linux/init.h> | ||
23 | |||
24 | #include <asm/asm.h> | ||
25 | #include <asm/bootinfo.h> | ||
26 | #include <asm/cacheops.h> | ||
27 | #include <asm/cpu.h> | ||
28 | #include <asm/mipsregs.h> | ||
29 | #include <asm/mmu_context.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | |||
32 | extern void sb1_dma_init(void); | ||
33 | |||
34 | /* These are probed at ld_mmu time */ | ||
35 | static unsigned long icache_size; | ||
36 | static unsigned long dcache_size; | ||
37 | |||
38 | static unsigned short icache_line_size; | ||
39 | static unsigned short dcache_line_size; | ||
40 | |||
41 | static unsigned int icache_index_mask; | ||
42 | static unsigned int dcache_index_mask; | ||
43 | |||
44 | static unsigned short icache_assoc; | ||
45 | static unsigned short dcache_assoc; | ||
46 | |||
47 | static unsigned short icache_sets; | ||
48 | static unsigned short dcache_sets; | ||
49 | |||
50 | static unsigned int icache_range_cutoff; | ||
51 | static unsigned int dcache_range_cutoff; | ||
52 | |||
53 | /* | ||
54 | * The dcache is fully coherent to the system, with one | ||
55 | * big caveat: the instruction stream. In other words, | ||
56 | * if we miss in the icache, and have dirty data in the | ||
57 | * L1 dcache, then we'll go out to memory (or the L2) and | ||
58 | * get the not-as-recent data. | ||
59 | * | ||
60 | * So the only time we have to flush the dcache is when | ||
61 | * we're flushing the icache. Since the L2 is fully | ||
62 | * coherent to everything, including I/O, we never have | ||
63 | * to flush it | ||
64 | */ | ||
65 | |||
66 | #define cache_set_op(op, addr) \ | ||
67 | __asm__ __volatile__( \ | ||
68 | " .set noreorder \n" \ | ||
69 | " .set mips64\n\t \n" \ | ||
70 | " cache %0, (0<<13)(%1) \n" \ | ||
71 | " cache %0, (1<<13)(%1) \n" \ | ||
72 | " cache %0, (2<<13)(%1) \n" \ | ||
73 | " cache %0, (3<<13)(%1) \n" \ | ||
74 | " .set mips0 \n" \ | ||
75 | " .set reorder" \ | ||
76 | : \ | ||
77 | : "i" (op), "r" (addr)) | ||
78 | |||
79 | #define sync() \ | ||
80 | __asm__ __volatile( \ | ||
81 | " .set mips64\n\t \n" \ | ||
82 | " sync \n" \ | ||
83 | " .set mips0") | ||
84 | |||
85 | #define mispredict() \ | ||
86 | __asm__ __volatile__( \ | ||
87 | " bnezl $0, 1f \n" /* Force mispredict */ \ | ||
88 | "1: \n"); | ||
89 | |||
90 | /* | ||
91 | * Writeback and invalidate the entire dcache | ||
92 | */ | ||
93 | static inline void __sb1_writeback_inv_dcache_all(void) | ||
94 | { | ||
95 | unsigned long addr = 0; | ||
96 | |||
97 | while (addr < dcache_line_size * dcache_sets) { | ||
98 | cache_set_op(Index_Writeback_Inv_D, addr); | ||
99 | addr += dcache_line_size; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Writeback and invalidate a range of the dcache. The addresses are | ||
105 | * virtual, and since we're using index ops and bit 12 is part of both | ||
106 | * the virtual frame and physical index, we have to clear both sets | ||
107 | * (bit 12 set and cleared). | ||
108 | */ | ||
109 | static inline void __sb1_writeback_inv_dcache_range(unsigned long start, | ||
110 | unsigned long end) | ||
111 | { | ||
112 | unsigned long index; | ||
113 | |||
114 | start &= ~(dcache_line_size - 1); | ||
115 | end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); | ||
116 | |||
117 | while (start != end) { | ||
118 | index = start & dcache_index_mask; | ||
119 | cache_set_op(Index_Writeback_Inv_D, index); | ||
120 | cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12)); | ||
121 | start += dcache_line_size; | ||
122 | } | ||
123 | sync(); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Writeback and invalidate a range of the dcache. With physical | ||
128 | * addresseses, we don't have to worry about possible bit 12 aliasing. | ||
129 | * XXXKW is it worth turning on KX and using hit ops with xkphys? | ||
130 | */ | ||
131 | static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start, | ||
132 | unsigned long end) | ||
133 | { | ||
134 | start &= ~(dcache_line_size - 1); | ||
135 | end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); | ||
136 | |||
137 | while (start != end) { | ||
138 | cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask); | ||
139 | start += dcache_line_size; | ||
140 | } | ||
141 | sync(); | ||
142 | } | ||
143 | |||
144 | |||
145 | /* | ||
146 | * Invalidate the entire icache | ||
147 | */ | ||
148 | static inline void __sb1_flush_icache_all(void) | ||
149 | { | ||
150 | unsigned long addr = 0; | ||
151 | |||
152 | while (addr < icache_line_size * icache_sets) { | ||
153 | cache_set_op(Index_Invalidate_I, addr); | ||
154 | addr += icache_line_size; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Flush the icache for a given physical page. Need to writeback the | ||
160 | * dcache first, then invalidate the icache. If the page isn't | ||
161 | * executable, nothing is required. | ||
162 | */ | ||
163 | static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | ||
164 | { | ||
165 | int cpu = smp_processor_id(); | ||
166 | |||
167 | #ifndef CONFIG_SMP | ||
168 | if (!(vma->vm_flags & VM_EXEC)) | ||
169 | return; | ||
170 | #endif | ||
171 | |||
172 | __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); | ||
173 | |||
174 | /* | ||
175 | * Bumping the ASID is probably cheaper than the flush ... | ||
176 | */ | ||
177 | if (cpu_context(cpu, vma->vm_mm) != 0) | ||
178 | drop_mmu_context(vma->vm_mm, cpu); | ||
179 | } | ||
180 | |||
181 | #ifdef CONFIG_SMP | ||
182 | struct flush_cache_page_args { | ||
183 | struct vm_area_struct *vma; | ||
184 | unsigned long addr; | ||
185 | unsigned long pfn; | ||
186 | }; | ||
187 | |||
188 | static void sb1_flush_cache_page_ipi(void *info) | ||
189 | { | ||
190 | struct flush_cache_page_args *args = info; | ||
191 | |||
192 | local_sb1_flush_cache_page(args->vma, args->addr, args->pfn); | ||
193 | } | ||
194 | |||
195 | /* Dirty dcache could be on another CPU, so do the IPIs */ | ||
196 | static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | ||
197 | { | ||
198 | struct flush_cache_page_args args; | ||
199 | |||
200 | if (!(vma->vm_flags & VM_EXEC)) | ||
201 | return; | ||
202 | |||
203 | addr &= PAGE_MASK; | ||
204 | args.vma = vma; | ||
205 | args.addr = addr; | ||
206 | args.pfn = pfn; | ||
207 | on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); | ||
208 | } | ||
209 | #else | ||
210 | void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | ||
211 | __attribute__((alias("local_sb1_flush_cache_page"))); | ||
212 | #endif | ||
213 | |||
214 | /* | ||
215 | * Invalidate a range of the icache. The addresses are virtual, and | ||
216 | * the cache is virtually indexed and tagged. However, we don't | ||
217 | * necessarily have the right ASID context, so use index ops instead | ||
218 | * of hit ops. | ||
219 | */ | ||
220 | static inline void __sb1_flush_icache_range(unsigned long start, | ||
221 | unsigned long end) | ||
222 | { | ||
223 | start &= ~(icache_line_size - 1); | ||
224 | end = (end + icache_line_size - 1) & ~(icache_line_size - 1); | ||
225 | |||
226 | while (start != end) { | ||
227 | cache_set_op(Index_Invalidate_I, start & icache_index_mask); | ||
228 | start += icache_line_size; | ||
229 | } | ||
230 | mispredict(); | ||
231 | sync(); | ||
232 | } | ||
233 | |||
234 | |||
235 | /* | ||
236 | * Invalidate all caches on this CPU | ||
237 | */ | ||
238 | static void local_sb1___flush_cache_all(void) | ||
239 | { | ||
240 | __sb1_writeback_inv_dcache_all(); | ||
241 | __sb1_flush_icache_all(); | ||
242 | } | ||
243 | |||
244 | #ifdef CONFIG_SMP | ||
245 | void sb1___flush_cache_all_ipi(void *ignored) | ||
246 | __attribute__((alias("local_sb1___flush_cache_all"))); | ||
247 | |||
248 | static void sb1___flush_cache_all(void) | ||
249 | { | ||
250 | on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); | ||
251 | } | ||
252 | #else | ||
253 | void sb1___flush_cache_all(void) | ||
254 | __attribute__((alias("local_sb1___flush_cache_all"))); | ||
255 | #endif | ||
256 | |||
257 | /* | ||
258 | * When flushing a range in the icache, we have to first writeback | ||
259 | * the dcache for the same range, so new ifetches will see any | ||
260 | * data that was dirty in the dcache. | ||
261 | * | ||
262 | * The start/end arguments are Kseg addresses (possibly mapped Kseg). | ||
263 | */ | ||
264 | |||
265 | static void local_sb1_flush_icache_range(unsigned long start, | ||
266 | unsigned long end) | ||
267 | { | ||
268 | /* Just wb-inv the whole dcache if the range is big enough */ | ||
269 | if ((end - start) > dcache_range_cutoff) | ||
270 | __sb1_writeback_inv_dcache_all(); | ||
271 | else | ||
272 | __sb1_writeback_inv_dcache_range(start, end); | ||
273 | |||
274 | /* Just flush the whole icache if the range is big enough */ | ||
275 | if ((end - start) > icache_range_cutoff) | ||
276 | __sb1_flush_icache_all(); | ||
277 | else | ||
278 | __sb1_flush_icache_range(start, end); | ||
279 | } | ||
280 | |||
281 | #ifdef CONFIG_SMP | ||
282 | struct flush_icache_range_args { | ||
283 | unsigned long start; | ||
284 | unsigned long end; | ||
285 | }; | ||
286 | |||
287 | static void sb1_flush_icache_range_ipi(void *info) | ||
288 | { | ||
289 | struct flush_icache_range_args *args = info; | ||
290 | |||
291 | local_sb1_flush_icache_range(args->start, args->end); | ||
292 | } | ||
293 | |||
294 | void sb1_flush_icache_range(unsigned long start, unsigned long end) | ||
295 | { | ||
296 | struct flush_icache_range_args args; | ||
297 | |||
298 | args.start = start; | ||
299 | args.end = end; | ||
300 | on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); | ||
301 | } | ||
302 | #else | ||
303 | void sb1_flush_icache_range(unsigned long start, unsigned long end) | ||
304 | __attribute__((alias("local_sb1_flush_icache_range"))); | ||
305 | #endif | ||
306 | |||
307 | /* | ||
308 | * Flush the icache for a given physical page. Need to writeback the | ||
309 | * dcache first, then invalidate the icache. If the page isn't | ||
310 | * executable, nothing is required. | ||
311 | */ | ||
312 | static void local_sb1_flush_icache_page(struct vm_area_struct *vma, | ||
313 | struct page *page) | ||
314 | { | ||
315 | unsigned long start; | ||
316 | int cpu = smp_processor_id(); | ||
317 | |||
318 | #ifndef CONFIG_SMP | ||
319 | if (!(vma->vm_flags & VM_EXEC)) | ||
320 | return; | ||
321 | #endif | ||
322 | |||
323 | /* Need to writeback any dirty data for that page, we have the PA */ | ||
324 | start = (unsigned long)(page-mem_map) << PAGE_SHIFT; | ||
325 | __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE); | ||
326 | /* | ||
327 | * If there's a context, bump the ASID (cheaper than a flush, | ||
328 | * since we don't know VAs!) | ||
329 | */ | ||
330 | if (cpu_context(cpu, vma->vm_mm) != 0) { | ||
331 | drop_mmu_context(vma->vm_mm, cpu); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | #ifdef CONFIG_SMP | ||
336 | struct flush_icache_page_args { | ||
337 | struct vm_area_struct *vma; | ||
338 | struct page *page; | ||
339 | }; | ||
340 | |||
341 | static void sb1_flush_icache_page_ipi(void *info) | ||
342 | { | ||
343 | struct flush_icache_page_args *args = info; | ||
344 | local_sb1_flush_icache_page(args->vma, args->page); | ||
345 | } | ||
346 | |||
347 | /* Dirty dcache could be on another CPU, so do the IPIs */ | ||
348 | static void sb1_flush_icache_page(struct vm_area_struct *vma, | ||
349 | struct page *page) | ||
350 | { | ||
351 | struct flush_icache_page_args args; | ||
352 | |||
353 | if (!(vma->vm_flags & VM_EXEC)) | ||
354 | return; | ||
355 | args.vma = vma; | ||
356 | args.page = page; | ||
357 | on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1); | ||
358 | } | ||
359 | #else | ||
360 | void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
361 | __attribute__((alias("local_sb1_flush_icache_page"))); | ||
362 | #endif | ||
363 | |||
364 | /* | ||
365 | * A signal trampoline must fit into a single cacheline. | ||
366 | */ | ||
367 | static void local_sb1_flush_cache_sigtramp(unsigned long addr) | ||
368 | { | ||
369 | cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask); | ||
370 | cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask); | ||
371 | cache_set_op(Index_Invalidate_I, addr & icache_index_mask); | ||
372 | mispredict(); | ||
373 | } | ||
374 | |||
375 | #ifdef CONFIG_SMP | ||
376 | static void sb1_flush_cache_sigtramp_ipi(void *info) | ||
377 | { | ||
378 | unsigned long iaddr = (unsigned long) info; | ||
379 | local_sb1_flush_cache_sigtramp(iaddr); | ||
380 | } | ||
381 | |||
382 | static void sb1_flush_cache_sigtramp(unsigned long addr) | ||
383 | { | ||
384 | on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); | ||
385 | } | ||
386 | #else | ||
387 | void sb1_flush_cache_sigtramp(unsigned long addr) | ||
388 | __attribute__((alias("local_sb1_flush_cache_sigtramp"))); | ||
389 | #endif | ||
390 | |||
391 | |||
392 | /* | ||
393 | * Anything that just flushes dcache state can be ignored, as we're always | ||
394 | * coherent in dcache space. This is just a dummy function that all the | ||
395 | * nop'ed routines point to | ||
396 | */ | ||
397 | static void sb1_nop(void) | ||
398 | { | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Cache set values (from the mips64 spec) | ||
403 | * 0 - 64 | ||
404 | * 1 - 128 | ||
405 | * 2 - 256 | ||
406 | * 3 - 512 | ||
407 | * 4 - 1024 | ||
408 | * 5 - 2048 | ||
409 | * 6 - 4096 | ||
410 | * 7 - Reserved | ||
411 | */ | ||
412 | |||
413 | static unsigned int decode_cache_sets(unsigned int config_field) | ||
414 | { | ||
415 | if (config_field == 7) { | ||
416 | /* JDCXXX - Find a graceful way to abort. */ | ||
417 | return 0; | ||
418 | } | ||
419 | return (1<<(config_field + 6)); | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Cache line size values (from the mips64 spec) | ||
424 | * 0 - No cache present. | ||
425 | * 1 - 4 bytes | ||
426 | * 2 - 8 bytes | ||
427 | * 3 - 16 bytes | ||
428 | * 4 - 32 bytes | ||
429 | * 5 - 64 bytes | ||
430 | * 6 - 128 bytes | ||
431 | * 7 - Reserved | ||
432 | */ | ||
433 | |||
434 | static unsigned int decode_cache_line_size(unsigned int config_field) | ||
435 | { | ||
436 | if (config_field == 0) { | ||
437 | return 0; | ||
438 | } else if (config_field == 7) { | ||
439 | /* JDCXXX - Find a graceful way to abort. */ | ||
440 | return 0; | ||
441 | } | ||
442 | return (1<<(config_field + 1)); | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs) | ||
447 | * | ||
448 | * 24:22 Icache sets per way | ||
449 | * 21:19 Icache line size | ||
450 | * 18:16 Icache Associativity | ||
451 | * 15:13 Dcache sets per way | ||
452 | * 12:10 Dcache line size | ||
453 | * 9:7 Dcache Associativity | ||
454 | */ | ||
455 | |||
456 | static char *way_string[] = { | ||
457 | "direct mapped", "2-way", "3-way", "4-way", | ||
458 | "5-way", "6-way", "7-way", "8-way", | ||
459 | }; | ||
460 | |||
461 | static __init void probe_cache_sizes(void) | ||
462 | { | ||
463 | u32 config1; | ||
464 | |||
465 | config1 = read_c0_config1(); | ||
466 | icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7); | ||
467 | dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7); | ||
468 | icache_sets = decode_cache_sets((config1 >> 22) & 0x7); | ||
469 | dcache_sets = decode_cache_sets((config1 >> 13) & 0x7); | ||
470 | icache_assoc = ((config1 >> 16) & 0x7) + 1; | ||
471 | dcache_assoc = ((config1 >> 7) & 0x7) + 1; | ||
472 | icache_size = icache_line_size * icache_sets * icache_assoc; | ||
473 | dcache_size = dcache_line_size * dcache_sets * dcache_assoc; | ||
474 | /* Need to remove non-index bits for index ops */ | ||
475 | icache_index_mask = (icache_sets - 1) * icache_line_size; | ||
476 | dcache_index_mask = (dcache_sets - 1) * dcache_line_size; | ||
477 | /* | ||
478 | * These are for choosing range (index ops) versus all. | ||
479 | * icache flushes all ways for each set, so drop icache_assoc. | ||
480 | * dcache flushes all ways and each setting of bit 12 for each | ||
481 | * index, so drop dcache_assoc and halve the dcache_sets. | ||
482 | */ | ||
483 | icache_range_cutoff = icache_sets * icache_line_size; | ||
484 | dcache_range_cutoff = (dcache_sets / 2) * icache_line_size; | ||
485 | |||
486 | printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n", | ||
487 | icache_size >> 10, way_string[icache_assoc - 1], | ||
488 | icache_line_size); | ||
489 | printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", | ||
490 | dcache_size >> 10, way_string[dcache_assoc - 1], | ||
491 | dcache_line_size); | ||
492 | } | ||
493 | |||
494 | /* | ||
495 | * This is called from loadmmu.c. We have to set up all the | ||
496 | * memory management function pointers, as well as initialize | ||
497 | * the caches and tlbs | ||
498 | */ | ||
499 | void ld_mmu_sb1(void) | ||
500 | { | ||
501 | extern char except_vec2_sb1; | ||
502 | extern char handle_vec2_sb1; | ||
503 | |||
504 | /* Special cache error handler for SB1 */ | ||
505 | memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80); | ||
506 | memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80); | ||
507 | memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80); | ||
508 | |||
509 | probe_cache_sizes(); | ||
510 | |||
511 | #ifdef CONFIG_SIBYTE_DMA_PAGEOPS | ||
512 | sb1_dma_init(); | ||
513 | #endif | ||
514 | |||
515 | /* | ||
516 | * None of these are needed for the SB1 - the Dcache is | ||
517 | * physically indexed and tagged, so no virtual aliasing can | ||
518 | * occur | ||
519 | */ | ||
520 | flush_cache_range = (void *) sb1_nop; | ||
521 | flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop; | ||
522 | flush_cache_all = sb1_nop; | ||
523 | |||
524 | /* These routines are for Icache coherence with the Dcache */ | ||
525 | flush_icache_range = sb1_flush_icache_range; | ||
526 | flush_icache_page = sb1_flush_icache_page; | ||
527 | flush_icache_all = __sb1_flush_icache_all; /* local only */ | ||
528 | |||
529 | /* This implies an Icache flush too, so can't be nop'ed */ | ||
530 | flush_cache_page = sb1_flush_cache_page; | ||
531 | |||
532 | flush_cache_sigtramp = sb1_flush_cache_sigtramp; | ||
533 | flush_data_cache_page = (void *) sb1_nop; | ||
534 | |||
535 | /* Full flush */ | ||
536 | __flush_cache_all = sb1___flush_cache_all; | ||
537 | |||
538 | change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); | ||
539 | |||
540 | /* | ||
541 | * This is the only way to force the update of K0 to complete | ||
542 | * before subsequent instruction fetch. | ||
543 | */ | ||
544 | __asm__ __volatile__( | ||
545 | ".set push \n" | ||
546 | " .set noat \n" | ||
547 | " .set noreorder \n" | ||
548 | " .set mips3 \n" | ||
549 | " " STR(PTR_LA) " $1, 1f \n" | ||
550 | " " STR(MTC0) " $1, $14 \n" | ||
551 | " eret \n" | ||
552 | "1: .set pop" | ||
553 | : | ||
554 | : | ||
555 | : "memory"); | ||
556 | |||
557 | flush_cache_all(); | ||
558 | } | ||