diff options
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 1260 |
1 files changed, 1260 insertions, 0 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c new file mode 100644 index 000000000000..a03ebb2cba67 --- /dev/null +++ b/arch/mips/mm/c-r4k.c | |||
@@ -0,0 +1,1260 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | ||
7 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/bitops.h> | ||
16 | |||
17 | #include <asm/bcache.h> | ||
18 | #include <asm/bootinfo.h> | ||
19 | #include <asm/cacheops.h> | ||
20 | #include <asm/cpu.h> | ||
21 | #include <asm/cpu-features.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/r4kcache.h> | ||
26 | #include <asm/system.h> | ||
27 | #include <asm/mmu_context.h> | ||
28 | #include <asm/war.h> | ||
29 | |||
30 | static unsigned long icache_size, dcache_size, scache_size; | ||
31 | |||
32 | /* | ||
33 | * Dummy cache handling routines for machines without boardcaches | ||
34 | */ | ||
35 | static void no_sc_noop(void) {} | ||
36 | |||
37 | static struct bcache_ops no_sc_ops = { | ||
38 | .bc_enable = (void *)no_sc_noop, | ||
39 | .bc_disable = (void *)no_sc_noop, | ||
40 | .bc_wback_inv = (void *)no_sc_noop, | ||
41 | .bc_inv = (void *)no_sc_noop | ||
42 | }; | ||
43 | |||
44 | struct bcache_ops *bcops = &no_sc_ops; | ||
45 | |||
46 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010) | ||
47 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020) | ||
48 | |||
49 | #define R4600_HIT_CACHEOP_WAR_IMPL \ | ||
50 | do { \ | ||
51 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ | ||
52 | *(volatile unsigned long *)CKSEG1; \ | ||
53 | if (R4600_V1_HIT_CACHEOP_WAR) \ | ||
54 | __asm__ __volatile__("nop;nop;nop;nop"); \ | ||
55 | } while (0) | ||
56 | |||
57 | static void (*r4k_blast_dcache_page)(unsigned long addr); | ||
58 | |||
59 | static inline void r4k_blast_dcache_page_dc32(unsigned long addr) | ||
60 | { | ||
61 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
62 | blast_dcache32_page(addr); | ||
63 | } | ||
64 | |||
65 | static inline void r4k_blast_dcache_page_setup(void) | ||
66 | { | ||
67 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
68 | |||
69 | if (dc_lsize == 16) | ||
70 | r4k_blast_dcache_page = blast_dcache16_page; | ||
71 | else if (dc_lsize == 32) | ||
72 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; | ||
73 | } | ||
74 | |||
75 | static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); | ||
76 | |||
77 | static inline void r4k_blast_dcache_page_indexed_setup(void) | ||
78 | { | ||
79 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
80 | |||
81 | if (dc_lsize == 16) | ||
82 | r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; | ||
83 | else if (dc_lsize == 32) | ||
84 | r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; | ||
85 | } | ||
86 | |||
87 | static void (* r4k_blast_dcache)(void); | ||
88 | |||
89 | static inline void r4k_blast_dcache_setup(void) | ||
90 | { | ||
91 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
92 | |||
93 | if (dc_lsize == 16) | ||
94 | r4k_blast_dcache = blast_dcache16; | ||
95 | else if (dc_lsize == 32) | ||
96 | r4k_blast_dcache = blast_dcache32; | ||
97 | } | ||
98 | |||
99 | /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ | ||
100 | #define JUMP_TO_ALIGN(order) \ | ||
101 | __asm__ __volatile__( \ | ||
102 | "b\t1f\n\t" \ | ||
103 | ".align\t" #order "\n\t" \ | ||
104 | "1:\n\t" \ | ||
105 | ) | ||
106 | #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ | ||
107 | #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) | ||
108 | |||
109 | static inline void blast_r4600_v1_icache32(void) | ||
110 | { | ||
111 | unsigned long flags; | ||
112 | |||
113 | local_irq_save(flags); | ||
114 | blast_icache32(); | ||
115 | local_irq_restore(flags); | ||
116 | } | ||
117 | |||
118 | static inline void tx49_blast_icache32(void) | ||
119 | { | ||
120 | unsigned long start = INDEX_BASE; | ||
121 | unsigned long end = start + current_cpu_data.icache.waysize; | ||
122 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
123 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
124 | current_cpu_data.icache.waybit; | ||
125 | unsigned long ws, addr; | ||
126 | |||
127 | CACHE32_UNROLL32_ALIGN2; | ||
128 | /* I'm in even chunk. blast odd chunks */ | ||
129 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
130 | for (addr = start + 0x400; addr < end; addr += 0x400 * 2) | ||
131 | cache32_unroll32(addr|ws,Index_Invalidate_I); | ||
132 | CACHE32_UNROLL32_ALIGN; | ||
133 | /* I'm in odd chunk. blast even chunks */ | ||
134 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
135 | for (addr = start; addr < end; addr += 0x400 * 2) | ||
136 | cache32_unroll32(addr|ws,Index_Invalidate_I); | ||
137 | } | ||
138 | |||
139 | static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) | ||
140 | { | ||
141 | unsigned long flags; | ||
142 | |||
143 | local_irq_save(flags); | ||
144 | blast_icache32_page_indexed(page); | ||
145 | local_irq_restore(flags); | ||
146 | } | ||
147 | |||
148 | static inline void tx49_blast_icache32_page_indexed(unsigned long page) | ||
149 | { | ||
150 | unsigned long start = page; | ||
151 | unsigned long end = start + PAGE_SIZE; | ||
152 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
153 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
154 | current_cpu_data.icache.waybit; | ||
155 | unsigned long ws, addr; | ||
156 | |||
157 | CACHE32_UNROLL32_ALIGN2; | ||
158 | /* I'm in even chunk. blast odd chunks */ | ||
159 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
160 | for (addr = start + 0x400; addr < end; addr += 0x400 * 2) | ||
161 | cache32_unroll32(addr|ws,Index_Invalidate_I); | ||
162 | CACHE32_UNROLL32_ALIGN; | ||
163 | /* I'm in odd chunk. blast even chunks */ | ||
164 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
165 | for (addr = start; addr < end; addr += 0x400 * 2) | ||
166 | cache32_unroll32(addr|ws,Index_Invalidate_I); | ||
167 | } | ||
168 | |||
169 | static void (* r4k_blast_icache_page)(unsigned long addr); | ||
170 | |||
171 | static inline void r4k_blast_icache_page_setup(void) | ||
172 | { | ||
173 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
174 | |||
175 | if (ic_lsize == 16) | ||
176 | r4k_blast_icache_page = blast_icache16_page; | ||
177 | else if (ic_lsize == 32) | ||
178 | r4k_blast_icache_page = blast_icache32_page; | ||
179 | else if (ic_lsize == 64) | ||
180 | r4k_blast_icache_page = blast_icache64_page; | ||
181 | } | ||
182 | |||
183 | |||
184 | static void (* r4k_blast_icache_page_indexed)(unsigned long addr); | ||
185 | |||
186 | static inline void r4k_blast_icache_page_indexed_setup(void) | ||
187 | { | ||
188 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
189 | |||
190 | if (ic_lsize == 16) | ||
191 | r4k_blast_icache_page_indexed = blast_icache16_page_indexed; | ||
192 | else if (ic_lsize == 32) { | ||
193 | if (TX49XX_ICACHE_INDEX_INV_WAR) | ||
194 | r4k_blast_icache_page_indexed = | ||
195 | tx49_blast_icache32_page_indexed; | ||
196 | else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) | ||
197 | r4k_blast_icache_page_indexed = | ||
198 | blast_icache32_r4600_v1_page_indexed; | ||
199 | else | ||
200 | r4k_blast_icache_page_indexed = | ||
201 | blast_icache32_page_indexed; | ||
202 | } else if (ic_lsize == 64) | ||
203 | r4k_blast_icache_page_indexed = blast_icache64_page_indexed; | ||
204 | } | ||
205 | |||
206 | static void (* r4k_blast_icache)(void); | ||
207 | |||
208 | static inline void r4k_blast_icache_setup(void) | ||
209 | { | ||
210 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
211 | |||
212 | if (ic_lsize == 16) | ||
213 | r4k_blast_icache = blast_icache16; | ||
214 | else if (ic_lsize == 32) { | ||
215 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) | ||
216 | r4k_blast_icache = blast_r4600_v1_icache32; | ||
217 | else if (TX49XX_ICACHE_INDEX_INV_WAR) | ||
218 | r4k_blast_icache = tx49_blast_icache32; | ||
219 | else | ||
220 | r4k_blast_icache = blast_icache32; | ||
221 | } else if (ic_lsize == 64) | ||
222 | r4k_blast_icache = blast_icache64; | ||
223 | } | ||
224 | |||
225 | static void (* r4k_blast_scache_page)(unsigned long addr); | ||
226 | |||
227 | static inline void r4k_blast_scache_page_setup(void) | ||
228 | { | ||
229 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
230 | |||
231 | if (sc_lsize == 16) | ||
232 | r4k_blast_scache_page = blast_scache16_page; | ||
233 | else if (sc_lsize == 32) | ||
234 | r4k_blast_scache_page = blast_scache32_page; | ||
235 | else if (sc_lsize == 64) | ||
236 | r4k_blast_scache_page = blast_scache64_page; | ||
237 | else if (sc_lsize == 128) | ||
238 | r4k_blast_scache_page = blast_scache128_page; | ||
239 | } | ||
240 | |||
241 | static void (* r4k_blast_scache_page_indexed)(unsigned long addr); | ||
242 | |||
243 | static inline void r4k_blast_scache_page_indexed_setup(void) | ||
244 | { | ||
245 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
246 | |||
247 | if (sc_lsize == 16) | ||
248 | r4k_blast_scache_page_indexed = blast_scache16_page_indexed; | ||
249 | else if (sc_lsize == 32) | ||
250 | r4k_blast_scache_page_indexed = blast_scache32_page_indexed; | ||
251 | else if (sc_lsize == 64) | ||
252 | r4k_blast_scache_page_indexed = blast_scache64_page_indexed; | ||
253 | else if (sc_lsize == 128) | ||
254 | r4k_blast_scache_page_indexed = blast_scache128_page_indexed; | ||
255 | } | ||
256 | |||
257 | static void (* r4k_blast_scache)(void); | ||
258 | |||
259 | static inline void r4k_blast_scache_setup(void) | ||
260 | { | ||
261 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
262 | |||
263 | if (sc_lsize == 16) | ||
264 | r4k_blast_scache = blast_scache16; | ||
265 | else if (sc_lsize == 32) | ||
266 | r4k_blast_scache = blast_scache32; | ||
267 | else if (sc_lsize == 64) | ||
268 | r4k_blast_scache = blast_scache64; | ||
269 | else if (sc_lsize == 128) | ||
270 | r4k_blast_scache = blast_scache128; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * This is former mm's flush_cache_all() which really should be | ||
275 | * flush_cache_vunmap these days ... | ||
276 | */ | ||
277 | static inline void local_r4k_flush_cache_all(void * args) | ||
278 | { | ||
279 | r4k_blast_dcache(); | ||
280 | r4k_blast_icache(); | ||
281 | } | ||
282 | |||
283 | static void r4k_flush_cache_all(void) | ||
284 | { | ||
285 | if (!cpu_has_dc_aliases) | ||
286 | return; | ||
287 | |||
288 | on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); | ||
289 | } | ||
290 | |||
291 | static inline void local_r4k___flush_cache_all(void * args) | ||
292 | { | ||
293 | r4k_blast_dcache(); | ||
294 | r4k_blast_icache(); | ||
295 | |||
296 | switch (current_cpu_data.cputype) { | ||
297 | case CPU_R4000SC: | ||
298 | case CPU_R4000MC: | ||
299 | case CPU_R4400SC: | ||
300 | case CPU_R4400MC: | ||
301 | case CPU_R10000: | ||
302 | case CPU_R12000: | ||
303 | r4k_blast_scache(); | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static void r4k___flush_cache_all(void) | ||
308 | { | ||
309 | on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); | ||
310 | } | ||
311 | |||
312 | static inline void local_r4k_flush_cache_range(void * args) | ||
313 | { | ||
314 | struct vm_area_struct *vma = args; | ||
315 | int exec; | ||
316 | |||
317 | if (!(cpu_context(smp_processor_id(), vma->vm_mm))) | ||
318 | return; | ||
319 | |||
320 | exec = vma->vm_flags & VM_EXEC; | ||
321 | if (cpu_has_dc_aliases || exec) | ||
322 | r4k_blast_dcache(); | ||
323 | if (exec) | ||
324 | r4k_blast_icache(); | ||
325 | } | ||
326 | |||
327 | static void r4k_flush_cache_range(struct vm_area_struct *vma, | ||
328 | unsigned long start, unsigned long end) | ||
329 | { | ||
330 | on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); | ||
331 | } | ||
332 | |||
333 | static inline void local_r4k_flush_cache_mm(void * args) | ||
334 | { | ||
335 | struct mm_struct *mm = args; | ||
336 | |||
337 | if (!cpu_context(smp_processor_id(), mm)) | ||
338 | return; | ||
339 | |||
340 | r4k_blast_dcache(); | ||
341 | r4k_blast_icache(); | ||
342 | |||
343 | /* | ||
344 | * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we | ||
345 | * only flush the primary caches but R10000 and R12000 behave sane ... | ||
346 | */ | ||
347 | if (current_cpu_data.cputype == CPU_R4000SC || | ||
348 | current_cpu_data.cputype == CPU_R4000MC || | ||
349 | current_cpu_data.cputype == CPU_R4400SC || | ||
350 | current_cpu_data.cputype == CPU_R4400MC) | ||
351 | r4k_blast_scache(); | ||
352 | } | ||
353 | |||
354 | static void r4k_flush_cache_mm(struct mm_struct *mm) | ||
355 | { | ||
356 | if (!cpu_has_dc_aliases) | ||
357 | return; | ||
358 | |||
359 | on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); | ||
360 | } | ||
361 | |||
362 | struct flush_cache_page_args { | ||
363 | struct vm_area_struct *vma; | ||
364 | unsigned long page; | ||
365 | }; | ||
366 | |||
367 | static inline void local_r4k_flush_cache_page(void *args) | ||
368 | { | ||
369 | struct flush_cache_page_args *fcp_args = args; | ||
370 | struct vm_area_struct *vma = fcp_args->vma; | ||
371 | unsigned long page = fcp_args->page; | ||
372 | int exec = vma->vm_flags & VM_EXEC; | ||
373 | struct mm_struct *mm = vma->vm_mm; | ||
374 | pgd_t *pgdp; | ||
375 | pmd_t *pmdp; | ||
376 | pte_t *ptep; | ||
377 | |||
378 | page &= PAGE_MASK; | ||
379 | pgdp = pgd_offset(mm, page); | ||
380 | pmdp = pmd_offset(pgdp, page); | ||
381 | ptep = pte_offset(pmdp, page); | ||
382 | |||
383 | /* | ||
384 | * If the page isn't marked valid, the page cannot possibly be | ||
385 | * in the cache. | ||
386 | */ | ||
387 | if (!(pte_val(*ptep) & _PAGE_PRESENT)) | ||
388 | return; | ||
389 | |||
390 | /* | ||
391 | * Doing flushes for another ASID than the current one is | ||
392 | * too difficult since stupid R4k caches do a TLB translation | ||
393 | * for every cache flush operation. So we do indexed flushes | ||
394 | * in that case, which doesn't overly flush the cache too much. | ||
395 | */ | ||
396 | if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { | ||
397 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { | ||
398 | r4k_blast_dcache_page(page); | ||
399 | if (exec && !cpu_icache_snoops_remote_store) | ||
400 | r4k_blast_scache_page(page); | ||
401 | } | ||
402 | if (exec) | ||
403 | r4k_blast_icache_page(page); | ||
404 | |||
405 | return; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Do indexed flush, too much work to get the (possible) TLB refills | ||
410 | * to work correctly. | ||
411 | */ | ||
412 | page = INDEX_BASE + (page & (dcache_size - 1)); | ||
413 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { | ||
414 | r4k_blast_dcache_page_indexed(page); | ||
415 | if (exec && !cpu_icache_snoops_remote_store) | ||
416 | r4k_blast_scache_page_indexed(page); | ||
417 | } | ||
418 | if (exec) { | ||
419 | if (cpu_has_vtag_icache) { | ||
420 | int cpu = smp_processor_id(); | ||
421 | |||
422 | if (cpu_context(cpu, vma->vm_mm) != 0) | ||
423 | drop_mmu_context(vma->vm_mm, cpu); | ||
424 | } else | ||
425 | r4k_blast_icache_page_indexed(page); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) | ||
430 | { | ||
431 | struct flush_cache_page_args args; | ||
432 | |||
433 | /* | ||
434 | * If ownes no valid ASID yet, cannot possibly have gotten | ||
435 | * this page into the cache. | ||
436 | */ | ||
437 | if (cpu_context(smp_processor_id(), vma->vm_mm) == 0) | ||
438 | return; | ||
439 | |||
440 | args.vma = vma; | ||
441 | args.page = page; | ||
442 | |||
443 | on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); | ||
444 | } | ||
445 | |||
446 | static inline void local_r4k_flush_data_cache_page(void * addr) | ||
447 | { | ||
448 | r4k_blast_dcache_page((unsigned long) addr); | ||
449 | } | ||
450 | |||
451 | static void r4k_flush_data_cache_page(unsigned long addr) | ||
452 | { | ||
453 | on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); | ||
454 | } | ||
455 | |||
456 | struct flush_icache_range_args { | ||
457 | unsigned long start; | ||
458 | unsigned long end; | ||
459 | }; | ||
460 | |||
461 | static inline void local_r4k_flush_icache_range(void *args) | ||
462 | { | ||
463 | struct flush_icache_range_args *fir_args = args; | ||
464 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | ||
465 | unsigned long ic_lsize = current_cpu_data.icache.linesz; | ||
466 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | ||
467 | unsigned long start = fir_args->start; | ||
468 | unsigned long end = fir_args->end; | ||
469 | unsigned long addr, aend; | ||
470 | |||
471 | if (!cpu_has_ic_fills_f_dc) { | ||
472 | if (end - start > dcache_size) { | ||
473 | r4k_blast_dcache(); | ||
474 | } else { | ||
475 | addr = start & ~(dc_lsize - 1); | ||
476 | aend = (end - 1) & ~(dc_lsize - 1); | ||
477 | |||
478 | while (1) { | ||
479 | /* Hit_Writeback_Inv_D */ | ||
480 | protected_writeback_dcache_line(addr); | ||
481 | if (addr == aend) | ||
482 | break; | ||
483 | addr += dc_lsize; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | if (!cpu_icache_snoops_remote_store) { | ||
488 | if (end - start > scache_size) { | ||
489 | r4k_blast_scache(); | ||
490 | } else { | ||
491 | addr = start & ~(sc_lsize - 1); | ||
492 | aend = (end - 1) & ~(sc_lsize - 1); | ||
493 | |||
494 | while (1) { | ||
495 | /* Hit_Writeback_Inv_D */ | ||
496 | protected_writeback_scache_line(addr); | ||
497 | if (addr == aend) | ||
498 | break; | ||
499 | addr += sc_lsize; | ||
500 | } | ||
501 | } | ||
502 | } | ||
503 | } | ||
504 | |||
505 | if (end - start > icache_size) | ||
506 | r4k_blast_icache(); | ||
507 | else { | ||
508 | addr = start & ~(ic_lsize - 1); | ||
509 | aend = (end - 1) & ~(ic_lsize - 1); | ||
510 | while (1) { | ||
511 | /* Hit_Invalidate_I */ | ||
512 | protected_flush_icache_line(addr); | ||
513 | if (addr == aend) | ||
514 | break; | ||
515 | addr += ic_lsize; | ||
516 | } | ||
517 | } | ||
518 | } | ||
519 | |||
520 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) | ||
521 | { | ||
522 | struct flush_icache_range_args args; | ||
523 | |||
524 | args.start = start; | ||
525 | args.end = end; | ||
526 | |||
527 | on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); | ||
528 | } | ||
529 | |||
530 | /* | ||
531 | * Ok, this seriously sucks. We use them to flush a user page but don't | ||
532 | * know the virtual address, so we have to blast away the whole icache | ||
533 | * which is significantly more expensive than the real thing. Otoh we at | ||
534 | * least know the kernel address of the page so we can flush it | ||
535 | * selectivly. | ||
536 | */ | ||
537 | |||
538 | struct flush_icache_page_args { | ||
539 | struct vm_area_struct *vma; | ||
540 | struct page *page; | ||
541 | }; | ||
542 | |||
543 | static inline void local_r4k_flush_icache_page(void *args) | ||
544 | { | ||
545 | struct flush_icache_page_args *fip_args = args; | ||
546 | struct vm_area_struct *vma = fip_args->vma; | ||
547 | struct page *page = fip_args->page; | ||
548 | |||
549 | /* | ||
550 | * Tricky ... Because we don't know the virtual address we've got the | ||
551 | * choice of either invalidating the entire primary and secondary | ||
552 | * caches or invalidating the secondary caches also. With the subset | ||
553 | * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the | ||
554 | * secondary cache will result in any entries in the primary caches | ||
555 | * also getting invalidated which hopefully is a bit more economical. | ||
556 | */ | ||
557 | if (cpu_has_subset_pcaches) { | ||
558 | unsigned long addr = (unsigned long) page_address(page); | ||
559 | |||
560 | r4k_blast_scache_page(addr); | ||
561 | ClearPageDcacheDirty(page); | ||
562 | |||
563 | return; | ||
564 | } | ||
565 | |||
566 | if (!cpu_has_ic_fills_f_dc) { | ||
567 | unsigned long addr = (unsigned long) page_address(page); | ||
568 | r4k_blast_dcache_page(addr); | ||
569 | if (!cpu_icache_snoops_remote_store) | ||
570 | r4k_blast_scache_page(addr); | ||
571 | ClearPageDcacheDirty(page); | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * We're not sure of the virtual address(es) involved here, so | ||
576 | * we have to flush the entire I-cache. | ||
577 | */ | ||
578 | if (cpu_has_vtag_icache) { | ||
579 | int cpu = smp_processor_id(); | ||
580 | |||
581 | if (cpu_context(cpu, vma->vm_mm) != 0) | ||
582 | drop_mmu_context(vma->vm_mm, cpu); | ||
583 | } else | ||
584 | r4k_blast_icache(); | ||
585 | } | ||
586 | |||
587 | static void r4k_flush_icache_page(struct vm_area_struct *vma, | ||
588 | struct page *page) | ||
589 | { | ||
590 | struct flush_icache_page_args args; | ||
591 | |||
592 | /* | ||
593 | * If there's no context yet, or the page isn't executable, no I-cache | ||
594 | * flush is needed. | ||
595 | */ | ||
596 | if (!(vma->vm_flags & VM_EXEC)) | ||
597 | return; | ||
598 | |||
599 | args.vma = vma; | ||
600 | args.page = page; | ||
601 | |||
602 | on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); | ||
603 | } | ||
604 | |||
605 | |||
606 | #ifdef CONFIG_DMA_NONCOHERENT | ||
607 | |||
608 | static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | ||
609 | { | ||
610 | unsigned long end, a; | ||
611 | |||
612 | /* Catch bad driver code */ | ||
613 | BUG_ON(size == 0); | ||
614 | |||
615 | if (cpu_has_subset_pcaches) { | ||
616 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | ||
617 | |||
618 | if (size >= scache_size) { | ||
619 | r4k_blast_scache(); | ||
620 | return; | ||
621 | } | ||
622 | |||
623 | a = addr & ~(sc_lsize - 1); | ||
624 | end = (addr + size - 1) & ~(sc_lsize - 1); | ||
625 | while (1) { | ||
626 | flush_scache_line(a); /* Hit_Writeback_Inv_SD */ | ||
627 | if (a == end) | ||
628 | break; | ||
629 | a += sc_lsize; | ||
630 | } | ||
631 | return; | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Either no secondary cache or the available caches don't have the | ||
636 | * subset property so we have to flush the primary caches | ||
637 | * explicitly | ||
638 | */ | ||
639 | if (size >= dcache_size) { | ||
640 | r4k_blast_dcache(); | ||
641 | } else { | ||
642 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | ||
643 | |||
644 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
645 | a = addr & ~(dc_lsize - 1); | ||
646 | end = (addr + size - 1) & ~(dc_lsize - 1); | ||
647 | while (1) { | ||
648 | flush_dcache_line(a); /* Hit_Writeback_Inv_D */ | ||
649 | if (a == end) | ||
650 | break; | ||
651 | a += dc_lsize; | ||
652 | } | ||
653 | } | ||
654 | |||
655 | bc_wback_inv(addr, size); | ||
656 | } | ||
657 | |||
658 | static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | ||
659 | { | ||
660 | unsigned long end, a; | ||
661 | |||
662 | /* Catch bad driver code */ | ||
663 | BUG_ON(size == 0); | ||
664 | |||
665 | if (cpu_has_subset_pcaches) { | ||
666 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | ||
667 | |||
668 | if (size >= scache_size) { | ||
669 | r4k_blast_scache(); | ||
670 | return; | ||
671 | } | ||
672 | |||
673 | a = addr & ~(sc_lsize - 1); | ||
674 | end = (addr + size - 1) & ~(sc_lsize - 1); | ||
675 | while (1) { | ||
676 | flush_scache_line(a); /* Hit_Writeback_Inv_SD */ | ||
677 | if (a == end) | ||
678 | break; | ||
679 | a += sc_lsize; | ||
680 | } | ||
681 | return; | ||
682 | } | ||
683 | |||
684 | if (size >= dcache_size) { | ||
685 | r4k_blast_dcache(); | ||
686 | } else { | ||
687 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | ||
688 | |||
689 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
690 | a = addr & ~(dc_lsize - 1); | ||
691 | end = (addr + size - 1) & ~(dc_lsize - 1); | ||
692 | while (1) { | ||
693 | flush_dcache_line(a); /* Hit_Writeback_Inv_D */ | ||
694 | if (a == end) | ||
695 | break; | ||
696 | a += dc_lsize; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | bc_inv(addr, size); | ||
701 | } | ||
702 | #endif /* CONFIG_DMA_NONCOHERENT */ | ||
703 | |||
704 | /* | ||
705 | * While we're protected against bad userland addresses we don't care | ||
706 | * very much about what happens in that case. Usually a segmentation | ||
707 | * fault will dump the process later on anyway ... | ||
708 | */ | ||
709 | static void local_r4k_flush_cache_sigtramp(void * arg) | ||
710 | { | ||
711 | unsigned long ic_lsize = current_cpu_data.icache.linesz; | ||
712 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | ||
713 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | ||
714 | unsigned long addr = (unsigned long) arg; | ||
715 | |||
716 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
717 | protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); | ||
718 | if (!cpu_icache_snoops_remote_store) | ||
719 | protected_writeback_scache_line(addr & ~(sc_lsize - 1)); | ||
720 | protected_flush_icache_line(addr & ~(ic_lsize - 1)); | ||
721 | if (MIPS4K_ICACHE_REFILL_WAR) { | ||
722 | __asm__ __volatile__ ( | ||
723 | ".set push\n\t" | ||
724 | ".set noat\n\t" | ||
725 | ".set mips3\n\t" | ||
726 | #ifdef CONFIG_MIPS32 | ||
727 | "la $at,1f\n\t" | ||
728 | #endif | ||
729 | #ifdef CONFIG_MIPS64 | ||
730 | "dla $at,1f\n\t" | ||
731 | #endif | ||
732 | "cache %0,($at)\n\t" | ||
733 | "nop; nop; nop\n" | ||
734 | "1:\n\t" | ||
735 | ".set pop" | ||
736 | : | ||
737 | : "i" (Hit_Invalidate_I)); | ||
738 | } | ||
739 | if (MIPS_CACHE_SYNC_WAR) | ||
740 | __asm__ __volatile__ ("sync"); | ||
741 | } | ||
742 | |||
743 | static void r4k_flush_cache_sigtramp(unsigned long addr) | ||
744 | { | ||
745 | on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); | ||
746 | } | ||
747 | |||
748 | static void r4k_flush_icache_all(void) | ||
749 | { | ||
750 | if (cpu_has_vtag_icache) | ||
751 | r4k_blast_icache(); | ||
752 | } | ||
753 | |||
754 | static inline void rm7k_erratum31(void) | ||
755 | { | ||
756 | const unsigned long ic_lsize = 32; | ||
757 | unsigned long addr; | ||
758 | |||
759 | /* RM7000 erratum #31. The icache is screwed at startup. */ | ||
760 | write_c0_taglo(0); | ||
761 | write_c0_taghi(0); | ||
762 | |||
763 | for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { | ||
764 | __asm__ __volatile__ ( | ||
765 | ".set noreorder\n\t" | ||
766 | ".set mips3\n\t" | ||
767 | "cache\t%1, 0(%0)\n\t" | ||
768 | "cache\t%1, 0x1000(%0)\n\t" | ||
769 | "cache\t%1, 0x2000(%0)\n\t" | ||
770 | "cache\t%1, 0x3000(%0)\n\t" | ||
771 | "cache\t%2, 0(%0)\n\t" | ||
772 | "cache\t%2, 0x1000(%0)\n\t" | ||
773 | "cache\t%2, 0x2000(%0)\n\t" | ||
774 | "cache\t%2, 0x3000(%0)\n\t" | ||
775 | "cache\t%1, 0(%0)\n\t" | ||
776 | "cache\t%1, 0x1000(%0)\n\t" | ||
777 | "cache\t%1, 0x2000(%0)\n\t" | ||
778 | "cache\t%1, 0x3000(%0)\n\t" | ||
779 | ".set\tmips0\n\t" | ||
780 | ".set\treorder\n\t" | ||
781 | : | ||
782 | : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); | ||
783 | } | ||
784 | } | ||
785 | |||
786 | static char *way_string[] __initdata = { NULL, "direct mapped", "2-way", | ||
787 | "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" | ||
788 | }; | ||
789 | |||
790 | static void __init probe_pcache(void) | ||
791 | { | ||
792 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
793 | unsigned int config = read_c0_config(); | ||
794 | unsigned int prid = read_c0_prid(); | ||
795 | unsigned long config1; | ||
796 | unsigned int lsize; | ||
797 | |||
798 | switch (c->cputype) { | ||
799 | case CPU_R4600: /* QED style two way caches? */ | ||
800 | case CPU_R4700: | ||
801 | case CPU_R5000: | ||
802 | case CPU_NEVADA: | ||
803 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
804 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
805 | c->icache.ways = 2; | ||
806 | c->icache.waybit = ffs(icache_size/2) - 1; | ||
807 | |||
808 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
809 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
810 | c->dcache.ways = 2; | ||
811 | c->dcache.waybit= ffs(dcache_size/2) - 1; | ||
812 | |||
813 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
814 | break; | ||
815 | |||
816 | case CPU_R5432: | ||
817 | case CPU_R5500: | ||
818 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
819 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
820 | c->icache.ways = 2; | ||
821 | c->icache.waybit= 0; | ||
822 | |||
823 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
824 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
825 | c->dcache.ways = 2; | ||
826 | c->dcache.waybit = 0; | ||
827 | |||
828 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
829 | break; | ||
830 | |||
831 | case CPU_TX49XX: | ||
832 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
833 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
834 | c->icache.ways = 4; | ||
835 | c->icache.waybit= 0; | ||
836 | |||
837 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
838 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
839 | c->dcache.ways = 4; | ||
840 | c->dcache.waybit = 0; | ||
841 | |||
842 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
843 | break; | ||
844 | |||
845 | case CPU_R4000PC: | ||
846 | case CPU_R4000SC: | ||
847 | case CPU_R4000MC: | ||
848 | case CPU_R4400PC: | ||
849 | case CPU_R4400SC: | ||
850 | case CPU_R4400MC: | ||
851 | case CPU_R4300: | ||
852 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
853 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
854 | c->icache.ways = 1; | ||
855 | c->icache.waybit = 0; /* doesn't matter */ | ||
856 | |||
857 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
858 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
859 | c->dcache.ways = 1; | ||
860 | c->dcache.waybit = 0; /* does not matter */ | ||
861 | |||
862 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
863 | break; | ||
864 | |||
865 | case CPU_R10000: | ||
866 | case CPU_R12000: | ||
867 | icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); | ||
868 | c->icache.linesz = 64; | ||
869 | c->icache.ways = 2; | ||
870 | c->icache.waybit = 0; | ||
871 | |||
872 | dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); | ||
873 | c->dcache.linesz = 32; | ||
874 | c->dcache.ways = 2; | ||
875 | c->dcache.waybit = 0; | ||
876 | |||
877 | c->options |= MIPS_CPU_PREFETCH; | ||
878 | break; | ||
879 | |||
880 | case CPU_VR4133: | ||
881 | write_c0_config(config & ~CONF_EB); | ||
882 | case CPU_VR4131: | ||
883 | /* Workaround for cache instruction bug of VR4131 */ | ||
884 | if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || | ||
885 | c->processor_id == 0x0c82U) { | ||
886 | config &= ~0x00000030U; | ||
887 | config |= 0x00410000U; | ||
888 | write_c0_config(config); | ||
889 | } | ||
890 | icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); | ||
891 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
892 | c->icache.ways = 2; | ||
893 | c->icache.waybit = ffs(icache_size/2) - 1; | ||
894 | |||
895 | dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); | ||
896 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
897 | c->dcache.ways = 2; | ||
898 | c->dcache.waybit = ffs(dcache_size/2) - 1; | ||
899 | |||
900 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
901 | break; | ||
902 | |||
903 | case CPU_VR41XX: | ||
904 | case CPU_VR4111: | ||
905 | case CPU_VR4121: | ||
906 | case CPU_VR4122: | ||
907 | case CPU_VR4181: | ||
908 | case CPU_VR4181A: | ||
909 | icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); | ||
910 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
911 | c->icache.ways = 1; | ||
912 | c->icache.waybit = 0; /* doesn't matter */ | ||
913 | |||
914 | dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); | ||
915 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
916 | c->dcache.ways = 1; | ||
917 | c->dcache.waybit = 0; /* does not matter */ | ||
918 | |||
919 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
920 | break; | ||
921 | |||
922 | case CPU_RM7000: | ||
923 | rm7k_erratum31(); | ||
924 | |||
925 | case CPU_RM9000: | ||
926 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
927 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
928 | c->icache.ways = 4; | ||
929 | c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; | ||
930 | |||
931 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
932 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
933 | c->dcache.ways = 4; | ||
934 | c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; | ||
935 | |||
936 | #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR) | ||
937 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
938 | #endif | ||
939 | c->options |= MIPS_CPU_PREFETCH; | ||
940 | break; | ||
941 | |||
942 | default: | ||
943 | if (!(config & MIPS_CONF_M)) | ||
944 | panic("Don't know how to probe P-caches on this cpu."); | ||
945 | |||
946 | /* | ||
947 | * So we seem to be a MIPS32 or MIPS64 CPU | ||
948 | * So let's probe the I-cache ... | ||
949 | */ | ||
950 | config1 = read_c0_config1(); | ||
951 | |||
952 | if ((lsize = ((config1 >> 19) & 7))) | ||
953 | c->icache.linesz = 2 << lsize; | ||
954 | else | ||
955 | c->icache.linesz = lsize; | ||
956 | c->icache.sets = 64 << ((config1 >> 22) & 7); | ||
957 | c->icache.ways = 1 + ((config1 >> 16) & 7); | ||
958 | |||
959 | icache_size = c->icache.sets * | ||
960 | c->icache.ways * | ||
961 | c->icache.linesz; | ||
962 | c->icache.waybit = ffs(icache_size/c->icache.ways) - 1; | ||
963 | |||
964 | if (config & 0x8) /* VI bit */ | ||
965 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
966 | |||
967 | /* | ||
968 | * Now probe the MIPS32 / MIPS64 data cache. | ||
969 | */ | ||
970 | c->dcache.flags = 0; | ||
971 | |||
972 | if ((lsize = ((config1 >> 10) & 7))) | ||
973 | c->dcache.linesz = 2 << lsize; | ||
974 | else | ||
975 | c->dcache.linesz= lsize; | ||
976 | c->dcache.sets = 64 << ((config1 >> 13) & 7); | ||
977 | c->dcache.ways = 1 + ((config1 >> 7) & 7); | ||
978 | |||
979 | dcache_size = c->dcache.sets * | ||
980 | c->dcache.ways * | ||
981 | c->dcache.linesz; | ||
982 | c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1; | ||
983 | |||
984 | c->options |= MIPS_CPU_PREFETCH; | ||
985 | break; | ||
986 | } | ||
987 | |||
988 | /* | ||
989 | * Processor configuration sanity check for the R4000SC erratum | ||
990 | * #5. With page sizes larger than 32kB there is no possibility | ||
991 | * to get a VCE exception anymore so we don't care about this | ||
992 | * misconfiguration. The case is rather theoretical anyway; | ||
993 | * presumably no vendor is shipping his hardware in the "bad" | ||
994 | * configuration. | ||
995 | */ | ||
996 | if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && | ||
997 | !(config & CONF_SC) && c->icache.linesz != 16 && | ||
998 | PAGE_SIZE <= 0x8000) | ||
999 | panic("Improper R4000SC processor configuration detected"); | ||
1000 | |||
1001 | /* compute a couple of other cache variables */ | ||
1002 | c->icache.waysize = icache_size / c->icache.ways; | ||
1003 | c->dcache.waysize = dcache_size / c->dcache.ways; | ||
1004 | |||
1005 | c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); | ||
1006 | c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); | ||
1007 | |||
1008 | /* | ||
1009 | * R10000 and R12000 P-caches are odd in a positive way. They're 32kB | ||
1010 | * 2-way virtually indexed so normally would suffer from aliases. So | ||
1011 | * normally they'd suffer from aliases but magic in the hardware deals | ||
1012 | * with that for us so we don't need to take care ourselves. | ||
1013 | */ | ||
1014 | if (c->cputype != CPU_R10000 && c->cputype != CPU_R12000) | ||
1015 | if (c->dcache.waysize > PAGE_SIZE) | ||
1016 | c->dcache.flags |= MIPS_CACHE_ALIASES; | ||
1017 | |||
1018 | switch (c->cputype) { | ||
1019 | case CPU_20KC: | ||
1020 | /* | ||
1021 | * Some older 20Kc chips doesn't have the 'VI' bit in | ||
1022 | * the config register. | ||
1023 | */ | ||
1024 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
1025 | break; | ||
1026 | |||
1027 | case CPU_AU1500: | ||
1028 | c->icache.flags |= MIPS_CACHE_IC_F_DC; | ||
1029 | break; | ||
1030 | } | ||
1031 | |||
1032 | printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", | ||
1033 | icache_size >> 10, | ||
1034 | cpu_has_vtag_icache ? "virtually tagged" : "physically tagged", | ||
1035 | way_string[c->icache.ways], c->icache.linesz); | ||
1036 | |||
1037 | printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", | ||
1038 | dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz); | ||
1039 | } | ||
1040 | |||
1041 | /* | ||
1042 | * If you even _breathe_ on this function, look at the gcc output and make sure | ||
1043 | * it does not pop things on and off the stack for the cache sizing loop that | ||
1044 | * executes in KSEG1 space or else you will crash and burn badly. You have | ||
1045 | * been warned. | ||
1046 | */ | ||
1047 | static int __init probe_scache(void) | ||
1048 | { | ||
1049 | extern unsigned long stext; | ||
1050 | unsigned long flags, addr, begin, end, pow2; | ||
1051 | unsigned int config = read_c0_config(); | ||
1052 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1053 | int tmp; | ||
1054 | |||
1055 | if (config & CONF_SC) | ||
1056 | return 0; | ||
1057 | |||
1058 | begin = (unsigned long) &stext; | ||
1059 | begin &= ~((4 * 1024 * 1024) - 1); | ||
1060 | end = begin + (4 * 1024 * 1024); | ||
1061 | |||
1062 | /* | ||
1063 | * This is such a bitch, you'd think they would make it easy to do | ||
1064 | * this. Away you daemons of stupidity! | ||
1065 | */ | ||
1066 | local_irq_save(flags); | ||
1067 | |||
1068 | /* Fill each size-multiple cache line with a valid tag. */ | ||
1069 | pow2 = (64 * 1024); | ||
1070 | for (addr = begin; addr < end; addr = (begin + pow2)) { | ||
1071 | unsigned long *p = (unsigned long *) addr; | ||
1072 | __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ | ||
1073 | pow2 <<= 1; | ||
1074 | } | ||
1075 | |||
1076 | /* Load first line with zero (therefore invalid) tag. */ | ||
1077 | write_c0_taglo(0); | ||
1078 | write_c0_taghi(0); | ||
1079 | __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ | ||
1080 | cache_op(Index_Store_Tag_I, begin); | ||
1081 | cache_op(Index_Store_Tag_D, begin); | ||
1082 | cache_op(Index_Store_Tag_SD, begin); | ||
1083 | |||
1084 | /* Now search for the wrap around point. */ | ||
1085 | pow2 = (128 * 1024); | ||
1086 | tmp = 0; | ||
1087 | for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { | ||
1088 | cache_op(Index_Load_Tag_SD, addr); | ||
1089 | __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ | ||
1090 | if (!read_c0_taglo()) | ||
1091 | break; | ||
1092 | pow2 <<= 1; | ||
1093 | } | ||
1094 | local_irq_restore(flags); | ||
1095 | addr -= begin; | ||
1096 | |||
1097 | scache_size = addr; | ||
1098 | c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); | ||
1099 | c->scache.ways = 1; | ||
1100 | c->dcache.waybit = 0; /* does not matter */ | ||
1101 | |||
1102 | return 1; | ||
1103 | } | ||
1104 | |||
1105 | typedef int (*probe_func_t)(unsigned long); | ||
1106 | extern int r5k_sc_init(void); | ||
1107 | extern int rm7k_sc_init(void); | ||
1108 | |||
1109 | static void __init setup_scache(void) | ||
1110 | { | ||
1111 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1112 | unsigned int config = read_c0_config(); | ||
1113 | probe_func_t probe_scache_kseg1; | ||
1114 | int sc_present = 0; | ||
1115 | |||
1116 | /* | ||
1117 | * Do the probing thing on R4000SC and R4400SC processors. Other | ||
1118 | * processors don't have a S-cache that would be relevant to the | ||
1119 | * Linux memory managment. | ||
1120 | */ | ||
1121 | switch (c->cputype) { | ||
1122 | case CPU_R4000SC: | ||
1123 | case CPU_R4000MC: | ||
1124 | case CPU_R4400SC: | ||
1125 | case CPU_R4400MC: | ||
1126 | probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache)); | ||
1127 | sc_present = probe_scache_kseg1(config); | ||
1128 | if (sc_present) | ||
1129 | c->options |= MIPS_CPU_CACHE_CDEX_S; | ||
1130 | break; | ||
1131 | |||
1132 | case CPU_R10000: | ||
1133 | case CPU_R12000: | ||
1134 | scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); | ||
1135 | c->scache.linesz = 64 << ((config >> 13) & 1); | ||
1136 | c->scache.ways = 2; | ||
1137 | c->scache.waybit= 0; | ||
1138 | sc_present = 1; | ||
1139 | break; | ||
1140 | |||
1141 | case CPU_R5000: | ||
1142 | case CPU_NEVADA: | ||
1143 | #ifdef CONFIG_R5000_CPU_SCACHE | ||
1144 | r5k_sc_init(); | ||
1145 | #endif | ||
1146 | return; | ||
1147 | |||
1148 | case CPU_RM7000: | ||
1149 | case CPU_RM9000: | ||
1150 | #ifdef CONFIG_RM7000_CPU_SCACHE | ||
1151 | rm7k_sc_init(); | ||
1152 | #endif | ||
1153 | return; | ||
1154 | |||
1155 | default: | ||
1156 | sc_present = 0; | ||
1157 | } | ||
1158 | |||
1159 | if (!sc_present) | ||
1160 | return; | ||
1161 | |||
1162 | if ((c->isa_level == MIPS_CPU_ISA_M32 || | ||
1163 | c->isa_level == MIPS_CPU_ISA_M64) && | ||
1164 | !(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) | ||
1165 | panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); | ||
1166 | |||
1167 | /* compute a couple of other cache variables */ | ||
1168 | c->scache.waysize = scache_size / c->scache.ways; | ||
1169 | |||
1170 | c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); | ||
1171 | |||
1172 | printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", | ||
1173 | scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); | ||
1174 | |||
1175 | c->options |= MIPS_CPU_SUBSET_CACHES; | ||
1176 | } | ||
1177 | |||
1178 | static inline void coherency_setup(void) | ||
1179 | { | ||
1180 | change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); | ||
1181 | |||
1182 | /* | ||
1183 | * c0_status.cu=0 specifies that updates by the sc instruction use | ||
1184 | * the coherency mode specified by the TLB; 1 means cachable | ||
1185 | * coherent update on write will be used. Not all processors have | ||
1186 | * this bit and; some wire it to zero, others like Toshiba had the | ||
1187 | * silly idea of putting something else there ... | ||
1188 | */ | ||
1189 | switch (current_cpu_data.cputype) { | ||
1190 | case CPU_R4000PC: | ||
1191 | case CPU_R4000SC: | ||
1192 | case CPU_R4000MC: | ||
1193 | case CPU_R4400PC: | ||
1194 | case CPU_R4400SC: | ||
1195 | case CPU_R4400MC: | ||
1196 | clear_c0_config(CONF_CU); | ||
1197 | break; | ||
1198 | } | ||
1199 | } | ||
1200 | |||
1201 | void __init ld_mmu_r4xx0(void) | ||
1202 | { | ||
1203 | extern void build_clear_page(void); | ||
1204 | extern void build_copy_page(void); | ||
1205 | extern char except_vec2_generic; | ||
1206 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1207 | |||
1208 | /* Default cache error handler for R4000 and R5000 family */ | ||
1209 | memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80); | ||
1210 | memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80); | ||
1211 | |||
1212 | probe_pcache(); | ||
1213 | setup_scache(); | ||
1214 | |||
1215 | if (c->dcache.sets * c->dcache.ways > PAGE_SIZE) | ||
1216 | c->dcache.flags |= MIPS_CACHE_ALIASES; | ||
1217 | |||
1218 | r4k_blast_dcache_page_setup(); | ||
1219 | r4k_blast_dcache_page_indexed_setup(); | ||
1220 | r4k_blast_dcache_setup(); | ||
1221 | r4k_blast_icache_page_setup(); | ||
1222 | r4k_blast_icache_page_indexed_setup(); | ||
1223 | r4k_blast_icache_setup(); | ||
1224 | r4k_blast_scache_page_setup(); | ||
1225 | r4k_blast_scache_page_indexed_setup(); | ||
1226 | r4k_blast_scache_setup(); | ||
1227 | |||
1228 | /* | ||
1229 | * Some MIPS32 and MIPS64 processors have physically indexed caches. | ||
1230 | * This code supports virtually indexed processors and will be | ||
1231 | * unnecessarily inefficient on physically indexed processors. | ||
1232 | */ | ||
1233 | shm_align_mask = max_t( unsigned long, | ||
1234 | c->dcache.sets * c->dcache.linesz - 1, | ||
1235 | PAGE_SIZE - 1); | ||
1236 | |||
1237 | flush_cache_all = r4k_flush_cache_all; | ||
1238 | __flush_cache_all = r4k___flush_cache_all; | ||
1239 | flush_cache_mm = r4k_flush_cache_mm; | ||
1240 | flush_cache_page = r4k_flush_cache_page; | ||
1241 | flush_icache_page = r4k_flush_icache_page; | ||
1242 | flush_cache_range = r4k_flush_cache_range; | ||
1243 | |||
1244 | flush_cache_sigtramp = r4k_flush_cache_sigtramp; | ||
1245 | flush_icache_all = r4k_flush_icache_all; | ||
1246 | flush_data_cache_page = r4k_flush_data_cache_page; | ||
1247 | flush_icache_range = r4k_flush_icache_range; | ||
1248 | |||
1249 | #ifdef CONFIG_DMA_NONCOHERENT | ||
1250 | _dma_cache_wback_inv = r4k_dma_cache_wback_inv; | ||
1251 | _dma_cache_wback = r4k_dma_cache_wback_inv; | ||
1252 | _dma_cache_inv = r4k_dma_cache_inv; | ||
1253 | #endif | ||
1254 | |||
1255 | __flush_cache_all(); | ||
1256 | coherency_setup(); | ||
1257 | |||
1258 | build_clear_page(); | ||
1259 | build_copy_page(); | ||
1260 | } | ||