diff options
Diffstat (limited to 'include/asm-mips/r4kcache.h')
-rw-r--r-- | include/asm-mips/r4kcache.h | 475 |
1 files changed, 92 insertions, 383 deletions
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h index a5ea9d828aee..0bcb79a58ee9 100644 --- a/include/asm-mips/r4kcache.h +++ b/include/asm-mips/r4kcache.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
17 | #include <asm/cpu-features.h> | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * This macro return a properly sign-extended address suitable as base address | 20 | * This macro return a properly sign-extended address suitable as base address |
@@ -78,22 +79,25 @@ static inline void flush_scache_line(unsigned long addr) | |||
78 | cache_op(Hit_Writeback_Inv_SD, addr); | 79 | cache_op(Hit_Writeback_Inv_SD, addr); |
79 | } | 80 | } |
80 | 81 | ||
82 | #define protected_cache_op(op,addr) \ | ||
83 | __asm__ __volatile__( \ | ||
84 | " .set push \n" \ | ||
85 | " .set noreorder \n" \ | ||
86 | " .set mips3 \n" \ | ||
87 | "1: cache %0, (%1) \n" \ | ||
88 | "2: .set pop \n" \ | ||
89 | " .section __ex_table,\"a\" \n" \ | ||
90 | " "STR(PTR)" 1b, 2b \n" \ | ||
91 | " .previous" \ | ||
92 | : \ | ||
93 | : "i" (op), "r" (addr)) | ||
94 | |||
81 | /* | 95 | /* |
82 | * The next two are for badland addresses like signal trampolines. | 96 | * The next two are for badland addresses like signal trampolines. |
83 | */ | 97 | */ |
84 | static inline void protected_flush_icache_line(unsigned long addr) | 98 | static inline void protected_flush_icache_line(unsigned long addr) |
85 | { | 99 | { |
86 | __asm__ __volatile__( | 100 | protected_cache_op(Hit_Invalidate_I, addr); |
87 | " .set push \n" | ||
88 | " .set noreorder \n" | ||
89 | " .set mips3 \n" | ||
90 | "1: cache %0, (%1) \n" | ||
91 | "2: .set pop \n" | ||
92 | " .section __ex_table,\"a\" \n" | ||
93 | " "STR(PTR)" 1b, 2b \n" | ||
94 | " .previous" | ||
95 | : | ||
96 | : "i" (Hit_Invalidate_I), "r" (addr)); | ||
97 | } | 101 | } |
98 | 102 | ||
99 | /* | 103 | /* |
@@ -104,32 +108,12 @@ static inline void protected_flush_icache_line(unsigned long addr) | |||
104 | */ | 108 | */ |
105 | static inline void protected_writeback_dcache_line(unsigned long addr) | 109 | static inline void protected_writeback_dcache_line(unsigned long addr) |
106 | { | 110 | { |
107 | __asm__ __volatile__( | 111 | protected_cache_op(Hit_Writeback_Inv_D, addr); |
108 | " .set push \n" | ||
109 | " .set noreorder \n" | ||
110 | " .set mips3 \n" | ||
111 | "1: cache %0, (%1) \n" | ||
112 | "2: .set pop \n" | ||
113 | " .section __ex_table,\"a\" \n" | ||
114 | " "STR(PTR)" 1b, 2b \n" | ||
115 | " .previous" | ||
116 | : | ||
117 | : "i" (Hit_Writeback_Inv_D), "r" (addr)); | ||
118 | } | 112 | } |
119 | 113 | ||
120 | static inline void protected_writeback_scache_line(unsigned long addr) | 114 | static inline void protected_writeback_scache_line(unsigned long addr) |
121 | { | 115 | { |
122 | __asm__ __volatile__( | 116 | protected_cache_op(Hit_Writeback_Inv_SD, addr); |
123 | " .set push \n" | ||
124 | " .set noreorder \n" | ||
125 | " .set mips3 \n" | ||
126 | "1: cache %0, (%1) \n" | ||
127 | "2: .set pop \n" | ||
128 | " .section __ex_table,\"a\" \n" | ||
129 | " "STR(PTR)" 1b, 2b \n" | ||
130 | " .previous" | ||
131 | : | ||
132 | : "i" (Hit_Writeback_Inv_SD), "r" (addr)); | ||
133 | } | 117 | } |
134 | 118 | ||
135 | /* | 119 | /* |
@@ -166,123 +150,6 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
166 | : "r" (base), \ | 150 | : "r" (base), \ |
167 | "i" (op)); | 151 | "i" (op)); |
168 | 152 | ||
169 | static inline void blast_dcache16(void) | ||
170 | { | ||
171 | unsigned long start = INDEX_BASE; | ||
172 | unsigned long end = start + current_cpu_data.dcache.waysize; | ||
173 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | ||
174 | unsigned long ws_end = current_cpu_data.dcache.ways << | ||
175 | current_cpu_data.dcache.waybit; | ||
176 | unsigned long ws, addr; | ||
177 | |||
178 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
179 | for (addr = start; addr < end; addr += 0x200) | ||
180 | cache16_unroll32(addr|ws,Index_Writeback_Inv_D); | ||
181 | } | ||
182 | |||
183 | static inline void blast_dcache16_page(unsigned long page) | ||
184 | { | ||
185 | unsigned long start = page; | ||
186 | unsigned long end = start + PAGE_SIZE; | ||
187 | |||
188 | do { | ||
189 | cache16_unroll32(start,Hit_Writeback_Inv_D); | ||
190 | start += 0x200; | ||
191 | } while (start < end); | ||
192 | } | ||
193 | |||
194 | static inline void blast_dcache16_page_indexed(unsigned long page) | ||
195 | { | ||
196 | unsigned long start = page; | ||
197 | unsigned long end = start + PAGE_SIZE; | ||
198 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | ||
199 | unsigned long ws_end = current_cpu_data.dcache.ways << | ||
200 | current_cpu_data.dcache.waybit; | ||
201 | unsigned long ws, addr; | ||
202 | |||
203 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
204 | for (addr = start; addr < end; addr += 0x200) | ||
205 | cache16_unroll32(addr|ws,Index_Writeback_Inv_D); | ||
206 | } | ||
207 | |||
208 | static inline void blast_icache16(void) | ||
209 | { | ||
210 | unsigned long start = INDEX_BASE; | ||
211 | unsigned long end = start + current_cpu_data.icache.waysize; | ||
212 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
213 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
214 | current_cpu_data.icache.waybit; | ||
215 | unsigned long ws, addr; | ||
216 | |||
217 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
218 | for (addr = start; addr < end; addr += 0x200) | ||
219 | cache16_unroll32(addr|ws,Index_Invalidate_I); | ||
220 | } | ||
221 | |||
222 | static inline void blast_icache16_page(unsigned long page) | ||
223 | { | ||
224 | unsigned long start = page; | ||
225 | unsigned long end = start + PAGE_SIZE; | ||
226 | |||
227 | do { | ||
228 | cache16_unroll32(start,Hit_Invalidate_I); | ||
229 | start += 0x200; | ||
230 | } while (start < end); | ||
231 | } | ||
232 | |||
233 | static inline void blast_icache16_page_indexed(unsigned long page) | ||
234 | { | ||
235 | unsigned long start = page; | ||
236 | unsigned long end = start + PAGE_SIZE; | ||
237 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
238 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
239 | current_cpu_data.icache.waybit; | ||
240 | unsigned long ws, addr; | ||
241 | |||
242 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
243 | for (addr = start; addr < end; addr += 0x200) | ||
244 | cache16_unroll32(addr|ws,Index_Invalidate_I); | ||
245 | } | ||
246 | |||
247 | static inline void blast_scache16(void) | ||
248 | { | ||
249 | unsigned long start = INDEX_BASE; | ||
250 | unsigned long end = start + current_cpu_data.scache.waysize; | ||
251 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | ||
252 | unsigned long ws_end = current_cpu_data.scache.ways << | ||
253 | current_cpu_data.scache.waybit; | ||
254 | unsigned long ws, addr; | ||
255 | |||
256 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
257 | for (addr = start; addr < end; addr += 0x200) | ||
258 | cache16_unroll32(addr|ws,Index_Writeback_Inv_SD); | ||
259 | } | ||
260 | |||
261 | static inline void blast_scache16_page(unsigned long page) | ||
262 | { | ||
263 | unsigned long start = page; | ||
264 | unsigned long end = page + PAGE_SIZE; | ||
265 | |||
266 | do { | ||
267 | cache16_unroll32(start,Hit_Writeback_Inv_SD); | ||
268 | start += 0x200; | ||
269 | } while (start < end); | ||
270 | } | ||
271 | |||
272 | static inline void blast_scache16_page_indexed(unsigned long page) | ||
273 | { | ||
274 | unsigned long start = page; | ||
275 | unsigned long end = start + PAGE_SIZE; | ||
276 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | ||
277 | unsigned long ws_end = current_cpu_data.scache.ways << | ||
278 | current_cpu_data.scache.waybit; | ||
279 | unsigned long ws, addr; | ||
280 | |||
281 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
282 | for (addr = start; addr < end; addr += 0x200) | ||
283 | cache16_unroll32(addr|ws,Index_Writeback_Inv_SD); | ||
284 | } | ||
285 | |||
286 | #define cache32_unroll32(base,op) \ | 153 | #define cache32_unroll32(base,op) \ |
287 | __asm__ __volatile__( \ | 154 | __asm__ __volatile__( \ |
288 | " .set push \n" \ | 155 | " .set push \n" \ |
@@ -309,123 +176,6 @@ static inline void blast_scache16_page_indexed(unsigned long page) | |||
309 | : "r" (base), \ | 176 | : "r" (base), \ |
310 | "i" (op)); | 177 | "i" (op)); |
311 | 178 | ||
312 | static inline void blast_dcache32(void) | ||
313 | { | ||
314 | unsigned long start = INDEX_BASE; | ||
315 | unsigned long end = start + current_cpu_data.dcache.waysize; | ||
316 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | ||
317 | unsigned long ws_end = current_cpu_data.dcache.ways << | ||
318 | current_cpu_data.dcache.waybit; | ||
319 | unsigned long ws, addr; | ||
320 | |||
321 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
322 | for (addr = start; addr < end; addr += 0x400) | ||
323 | cache32_unroll32(addr|ws,Index_Writeback_Inv_D); | ||
324 | } | ||
325 | |||
326 | static inline void blast_dcache32_page(unsigned long page) | ||
327 | { | ||
328 | unsigned long start = page; | ||
329 | unsigned long end = start + PAGE_SIZE; | ||
330 | |||
331 | do { | ||
332 | cache32_unroll32(start,Hit_Writeback_Inv_D); | ||
333 | start += 0x400; | ||
334 | } while (start < end); | ||
335 | } | ||
336 | |||
337 | static inline void blast_dcache32_page_indexed(unsigned long page) | ||
338 | { | ||
339 | unsigned long start = page; | ||
340 | unsigned long end = start + PAGE_SIZE; | ||
341 | unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; | ||
342 | unsigned long ws_end = current_cpu_data.dcache.ways << | ||
343 | current_cpu_data.dcache.waybit; | ||
344 | unsigned long ws, addr; | ||
345 | |||
346 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
347 | for (addr = start; addr < end; addr += 0x400) | ||
348 | cache32_unroll32(addr|ws,Index_Writeback_Inv_D); | ||
349 | } | ||
350 | |||
351 | static inline void blast_icache32(void) | ||
352 | { | ||
353 | unsigned long start = INDEX_BASE; | ||
354 | unsigned long end = start + current_cpu_data.icache.waysize; | ||
355 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
356 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
357 | current_cpu_data.icache.waybit; | ||
358 | unsigned long ws, addr; | ||
359 | |||
360 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
361 | for (addr = start; addr < end; addr += 0x400) | ||
362 | cache32_unroll32(addr|ws,Index_Invalidate_I); | ||
363 | } | ||
364 | |||
365 | static inline void blast_icache32_page(unsigned long page) | ||
366 | { | ||
367 | unsigned long start = page; | ||
368 | unsigned long end = start + PAGE_SIZE; | ||
369 | |||
370 | do { | ||
371 | cache32_unroll32(start,Hit_Invalidate_I); | ||
372 | start += 0x400; | ||
373 | } while (start < end); | ||
374 | } | ||
375 | |||
376 | static inline void blast_icache32_page_indexed(unsigned long page) | ||
377 | { | ||
378 | unsigned long start = page; | ||
379 | unsigned long end = start + PAGE_SIZE; | ||
380 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
381 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
382 | current_cpu_data.icache.waybit; | ||
383 | unsigned long ws, addr; | ||
384 | |||
385 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
386 | for (addr = start; addr < end; addr += 0x400) | ||
387 | cache32_unroll32(addr|ws,Index_Invalidate_I); | ||
388 | } | ||
389 | |||
390 | static inline void blast_scache32(void) | ||
391 | { | ||
392 | unsigned long start = INDEX_BASE; | ||
393 | unsigned long end = start + current_cpu_data.scache.waysize; | ||
394 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | ||
395 | unsigned long ws_end = current_cpu_data.scache.ways << | ||
396 | current_cpu_data.scache.waybit; | ||
397 | unsigned long ws, addr; | ||
398 | |||
399 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
400 | for (addr = start; addr < end; addr += 0x400) | ||
401 | cache32_unroll32(addr|ws,Index_Writeback_Inv_SD); | ||
402 | } | ||
403 | |||
404 | static inline void blast_scache32_page(unsigned long page) | ||
405 | { | ||
406 | unsigned long start = page; | ||
407 | unsigned long end = page + PAGE_SIZE; | ||
408 | |||
409 | do { | ||
410 | cache32_unroll32(start,Hit_Writeback_Inv_SD); | ||
411 | start += 0x400; | ||
412 | } while (start < end); | ||
413 | } | ||
414 | |||
415 | static inline void blast_scache32_page_indexed(unsigned long page) | ||
416 | { | ||
417 | unsigned long start = page; | ||
418 | unsigned long end = start + PAGE_SIZE; | ||
419 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | ||
420 | unsigned long ws_end = current_cpu_data.scache.ways << | ||
421 | current_cpu_data.scache.waybit; | ||
422 | unsigned long ws, addr; | ||
423 | |||
424 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
425 | for (addr = start; addr < end; addr += 0x400) | ||
426 | cache32_unroll32(addr|ws,Index_Writeback_Inv_SD); | ||
427 | } | ||
428 | |||
429 | #define cache64_unroll32(base,op) \ | 179 | #define cache64_unroll32(base,op) \ |
430 | __asm__ __volatile__( \ | 180 | __asm__ __volatile__( \ |
431 | " .set push \n" \ | 181 | " .set push \n" \ |
@@ -452,84 +202,6 @@ static inline void blast_scache32_page_indexed(unsigned long page) | |||
452 | : "r" (base), \ | 202 | : "r" (base), \ |
453 | "i" (op)); | 203 | "i" (op)); |
454 | 204 | ||
455 | static inline void blast_icache64(void) | ||
456 | { | ||
457 | unsigned long start = INDEX_BASE; | ||
458 | unsigned long end = start + current_cpu_data.icache.waysize; | ||
459 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
460 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
461 | current_cpu_data.icache.waybit; | ||
462 | unsigned long ws, addr; | ||
463 | |||
464 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
465 | for (addr = start; addr < end; addr += 0x800) | ||
466 | cache64_unroll32(addr|ws,Index_Invalidate_I); | ||
467 | } | ||
468 | |||
469 | static inline void blast_icache64_page(unsigned long page) | ||
470 | { | ||
471 | unsigned long start = page; | ||
472 | unsigned long end = start + PAGE_SIZE; | ||
473 | |||
474 | do { | ||
475 | cache64_unroll32(start,Hit_Invalidate_I); | ||
476 | start += 0x800; | ||
477 | } while (start < end); | ||
478 | } | ||
479 | |||
480 | static inline void blast_icache64_page_indexed(unsigned long page) | ||
481 | { | ||
482 | unsigned long start = page; | ||
483 | unsigned long end = start + PAGE_SIZE; | ||
484 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
485 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
486 | current_cpu_data.icache.waybit; | ||
487 | unsigned long ws, addr; | ||
488 | |||
489 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
490 | for (addr = start; addr < end; addr += 0x800) | ||
491 | cache64_unroll32(addr|ws,Index_Invalidate_I); | ||
492 | } | ||
493 | |||
494 | static inline void blast_scache64(void) | ||
495 | { | ||
496 | unsigned long start = INDEX_BASE; | ||
497 | unsigned long end = start + current_cpu_data.scache.waysize; | ||
498 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | ||
499 | unsigned long ws_end = current_cpu_data.scache.ways << | ||
500 | current_cpu_data.scache.waybit; | ||
501 | unsigned long ws, addr; | ||
502 | |||
503 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
504 | for (addr = start; addr < end; addr += 0x800) | ||
505 | cache64_unroll32(addr|ws,Index_Writeback_Inv_SD); | ||
506 | } | ||
507 | |||
508 | static inline void blast_scache64_page(unsigned long page) | ||
509 | { | ||
510 | unsigned long start = page; | ||
511 | unsigned long end = page + PAGE_SIZE; | ||
512 | |||
513 | do { | ||
514 | cache64_unroll32(start,Hit_Writeback_Inv_SD); | ||
515 | start += 0x800; | ||
516 | } while (start < end); | ||
517 | } | ||
518 | |||
519 | static inline void blast_scache64_page_indexed(unsigned long page) | ||
520 | { | ||
521 | unsigned long start = page; | ||
522 | unsigned long end = start + PAGE_SIZE; | ||
523 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | ||
524 | unsigned long ws_end = current_cpu_data.scache.ways << | ||
525 | current_cpu_data.scache.waybit; | ||
526 | unsigned long ws, addr; | ||
527 | |||
528 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
529 | for (addr = start; addr < end; addr += 0x800) | ||
530 | cache64_unroll32(addr|ws,Index_Writeback_Inv_SD); | ||
531 | } | ||
532 | |||
533 | #define cache128_unroll32(base,op) \ | 205 | #define cache128_unroll32(base,op) \ |
534 | __asm__ __volatile__( \ | 206 | __asm__ __volatile__( \ |
535 | " .set push \n" \ | 207 | " .set push \n" \ |
@@ -556,43 +228,80 @@ static inline void blast_scache64_page_indexed(unsigned long page) | |||
556 | : "r" (base), \ | 228 | : "r" (base), \ |
557 | "i" (op)); | 229 | "i" (op)); |
558 | 230 | ||
559 | static inline void blast_scache128(void) | 231 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ |
560 | { | 232 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ |
561 | unsigned long start = INDEX_BASE; | 233 | static inline void blast_##pfx##cache##lsize(void) \ |
562 | unsigned long end = start + current_cpu_data.scache.waysize; | 234 | { \ |
563 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | 235 | unsigned long start = INDEX_BASE; \ |
564 | unsigned long ws_end = current_cpu_data.scache.ways << | 236 | unsigned long end = start + current_cpu_data.desc.waysize; \ |
565 | current_cpu_data.scache.waybit; | 237 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ |
566 | unsigned long ws, addr; | 238 | unsigned long ws_end = current_cpu_data.desc.ways << \ |
567 | 239 | current_cpu_data.desc.waybit; \ | |
568 | for (ws = 0; ws < ws_end; ws += ws_inc) | 240 | unsigned long ws, addr; \ |
569 | for (addr = start; addr < end; addr += 0x1000) | 241 | \ |
570 | cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); | 242 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
571 | } | 243 | for (addr = start; addr < end; addr += lsize * 32) \ |
572 | 244 | cache##lsize##_unroll32(addr|ws,indexop); \ | |
573 | static inline void blast_scache128_page(unsigned long page) | 245 | } \ |
574 | { | 246 | \ |
575 | unsigned long start = page; | 247 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ |
576 | unsigned long end = page + PAGE_SIZE; | 248 | { \ |
577 | 249 | unsigned long start = page; \ | |
578 | do { | 250 | unsigned long end = page + PAGE_SIZE; \ |
579 | cache128_unroll32(start,Hit_Writeback_Inv_SD); | 251 | \ |
580 | start += 0x1000; | 252 | do { \ |
581 | } while (start < end); | 253 | cache##lsize##_unroll32(start,hitop); \ |
582 | } | 254 | start += lsize * 32; \ |
583 | 255 | } while (start < end); \ | |
584 | static inline void blast_scache128_page_indexed(unsigned long page) | 256 | } \ |
585 | { | 257 | \ |
586 | unsigned long start = page; | 258 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
587 | unsigned long end = start + PAGE_SIZE; | 259 | { \ |
588 | unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; | 260 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ |
589 | unsigned long ws_end = current_cpu_data.scache.ways << | 261 | unsigned long start = INDEX_BASE + (page & indexmask); \ |
590 | current_cpu_data.scache.waybit; | 262 | unsigned long end = start + PAGE_SIZE; \ |
591 | unsigned long ws, addr; | 263 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ |
592 | 264 | unsigned long ws_end = current_cpu_data.desc.ways << \ | |
593 | for (ws = 0; ws < ws_end; ws += ws_inc) | 265 | current_cpu_data.desc.waybit; \ |
594 | for (addr = start; addr < end; addr += 0x1000) | 266 | unsigned long ws, addr; \ |
595 | cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); | 267 | \ |
596 | } | 268 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
269 | for (addr = start; addr < end; addr += lsize * 32) \ | ||
270 | cache##lsize##_unroll32(addr|ws,indexop); \ | ||
271 | } | ||
272 | |||
273 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) | ||
274 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) | ||
275 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) | ||
276 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) | ||
277 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) | ||
278 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) | ||
279 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) | ||
280 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) | ||
281 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) | ||
282 | |||
283 | /* build blast_xxx_range, protected_blast_xxx_range */ | ||
284 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ | ||
285 | static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ | ||
286 | unsigned long end) \ | ||
287 | { \ | ||
288 | unsigned long lsize = cpu_##desc##_line_size(); \ | ||
289 | unsigned long addr = start & ~(lsize - 1); \ | ||
290 | unsigned long aend = (end - 1) & ~(lsize - 1); \ | ||
291 | while (1) { \ | ||
292 | prot##cache_op(hitop, addr); \ | ||
293 | if (addr == aend) \ | ||
294 | break; \ | ||
295 | addr += lsize; \ | ||
296 | } \ | ||
297 | } | ||
298 | |||
299 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) | ||
300 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) | ||
301 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) | ||
302 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) | ||
303 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) | ||
304 | /* blast_inv_dcache_range */ | ||
305 | __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) | ||
597 | 306 | ||
598 | #endif /* _ASM_R4KCACHE_H */ | 307 | #endif /* _ASM_R4KCACHE_H */ |