aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAtsushi Nemoto <anemo@mba.ocn.ne.jp>2006-01-28 12:30:55 -0500
committerRalf Baechle <ralf@linux-mips.org>2006-02-07 08:30:24 -0500
commit76f072a46f179be371aa10a84c85db06a387713b (patch)
tree3236c419f754dcc0c5356d4c47f389d8362793e9 /include
parent3055acb07a248324c9338c0624d26a6fdd9c2bf6 (diff)
[MIPS] Build blast_cache routines from template
Build blast_xxx, blast_xxx_page, blast_xxx_page_indexed from template. Easier to maintaina and saves 300 lines. Generated code should be unchanged. Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/r4kcache.h400
1 files changed, 50 insertions, 350 deletions
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h
index a5ea9d828aee..cc53196efa40 100644
--- a/include/asm-mips/r4kcache.h
+++ b/include/asm-mips/r4kcache.h
@@ -166,123 +166,6 @@ static inline void invalidate_tcache_page(unsigned long addr)
166 : "r" (base), \ 166 : "r" (base), \
167 "i" (op)); 167 "i" (op));
168 168
169static inline void blast_dcache16(void)
170{
171 unsigned long start = INDEX_BASE;
172 unsigned long end = start + current_cpu_data.dcache.waysize;
173 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
174 unsigned long ws_end = current_cpu_data.dcache.ways <<
175 current_cpu_data.dcache.waybit;
176 unsigned long ws, addr;
177
178 for (ws = 0; ws < ws_end; ws += ws_inc)
179 for (addr = start; addr < end; addr += 0x200)
180 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
181}
182
183static inline void blast_dcache16_page(unsigned long page)
184{
185 unsigned long start = page;
186 unsigned long end = start + PAGE_SIZE;
187
188 do {
189 cache16_unroll32(start,Hit_Writeback_Inv_D);
190 start += 0x200;
191 } while (start < end);
192}
193
194static inline void blast_dcache16_page_indexed(unsigned long page)
195{
196 unsigned long start = page;
197 unsigned long end = start + PAGE_SIZE;
198 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
199 unsigned long ws_end = current_cpu_data.dcache.ways <<
200 current_cpu_data.dcache.waybit;
201 unsigned long ws, addr;
202
203 for (ws = 0; ws < ws_end; ws += ws_inc)
204 for (addr = start; addr < end; addr += 0x200)
205 cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
206}
207
208static inline void blast_icache16(void)
209{
210 unsigned long start = INDEX_BASE;
211 unsigned long end = start + current_cpu_data.icache.waysize;
212 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
213 unsigned long ws_end = current_cpu_data.icache.ways <<
214 current_cpu_data.icache.waybit;
215 unsigned long ws, addr;
216
217 for (ws = 0; ws < ws_end; ws += ws_inc)
218 for (addr = start; addr < end; addr += 0x200)
219 cache16_unroll32(addr|ws,Index_Invalidate_I);
220}
221
222static inline void blast_icache16_page(unsigned long page)
223{
224 unsigned long start = page;
225 unsigned long end = start + PAGE_SIZE;
226
227 do {
228 cache16_unroll32(start,Hit_Invalidate_I);
229 start += 0x200;
230 } while (start < end);
231}
232
233static inline void blast_icache16_page_indexed(unsigned long page)
234{
235 unsigned long start = page;
236 unsigned long end = start + PAGE_SIZE;
237 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
238 unsigned long ws_end = current_cpu_data.icache.ways <<
239 current_cpu_data.icache.waybit;
240 unsigned long ws, addr;
241
242 for (ws = 0; ws < ws_end; ws += ws_inc)
243 for (addr = start; addr < end; addr += 0x200)
244 cache16_unroll32(addr|ws,Index_Invalidate_I);
245}
246
247static inline void blast_scache16(void)
248{
249 unsigned long start = INDEX_BASE;
250 unsigned long end = start + current_cpu_data.scache.waysize;
251 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
252 unsigned long ws_end = current_cpu_data.scache.ways <<
253 current_cpu_data.scache.waybit;
254 unsigned long ws, addr;
255
256 for (ws = 0; ws < ws_end; ws += ws_inc)
257 for (addr = start; addr < end; addr += 0x200)
258 cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
259}
260
261static inline void blast_scache16_page(unsigned long page)
262{
263 unsigned long start = page;
264 unsigned long end = page + PAGE_SIZE;
265
266 do {
267 cache16_unroll32(start,Hit_Writeback_Inv_SD);
268 start += 0x200;
269 } while (start < end);
270}
271
272static inline void blast_scache16_page_indexed(unsigned long page)
273{
274 unsigned long start = page;
275 unsigned long end = start + PAGE_SIZE;
276 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
277 unsigned long ws_end = current_cpu_data.scache.ways <<
278 current_cpu_data.scache.waybit;
279 unsigned long ws, addr;
280
281 for (ws = 0; ws < ws_end; ws += ws_inc)
282 for (addr = start; addr < end; addr += 0x200)
283 cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
284}
285
286#define cache32_unroll32(base,op) \ 169#define cache32_unroll32(base,op) \
287 __asm__ __volatile__( \ 170 __asm__ __volatile__( \
288 " .set push \n" \ 171 " .set push \n" \
@@ -309,123 +192,6 @@ static inline void blast_scache16_page_indexed(unsigned long page)
309 : "r" (base), \ 192 : "r" (base), \
310 "i" (op)); 193 "i" (op));
311 194
312static inline void blast_dcache32(void)
313{
314 unsigned long start = INDEX_BASE;
315 unsigned long end = start + current_cpu_data.dcache.waysize;
316 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
317 unsigned long ws_end = current_cpu_data.dcache.ways <<
318 current_cpu_data.dcache.waybit;
319 unsigned long ws, addr;
320
321 for (ws = 0; ws < ws_end; ws += ws_inc)
322 for (addr = start; addr < end; addr += 0x400)
323 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
324}
325
326static inline void blast_dcache32_page(unsigned long page)
327{
328 unsigned long start = page;
329 unsigned long end = start + PAGE_SIZE;
330
331 do {
332 cache32_unroll32(start,Hit_Writeback_Inv_D);
333 start += 0x400;
334 } while (start < end);
335}
336
337static inline void blast_dcache32_page_indexed(unsigned long page)
338{
339 unsigned long start = page;
340 unsigned long end = start + PAGE_SIZE;
341 unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
342 unsigned long ws_end = current_cpu_data.dcache.ways <<
343 current_cpu_data.dcache.waybit;
344 unsigned long ws, addr;
345
346 for (ws = 0; ws < ws_end; ws += ws_inc)
347 for (addr = start; addr < end; addr += 0x400)
348 cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
349}
350
351static inline void blast_icache32(void)
352{
353 unsigned long start = INDEX_BASE;
354 unsigned long end = start + current_cpu_data.icache.waysize;
355 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
356 unsigned long ws_end = current_cpu_data.icache.ways <<
357 current_cpu_data.icache.waybit;
358 unsigned long ws, addr;
359
360 for (ws = 0; ws < ws_end; ws += ws_inc)
361 for (addr = start; addr < end; addr += 0x400)
362 cache32_unroll32(addr|ws,Index_Invalidate_I);
363}
364
365static inline void blast_icache32_page(unsigned long page)
366{
367 unsigned long start = page;
368 unsigned long end = start + PAGE_SIZE;
369
370 do {
371 cache32_unroll32(start,Hit_Invalidate_I);
372 start += 0x400;
373 } while (start < end);
374}
375
376static inline void blast_icache32_page_indexed(unsigned long page)
377{
378 unsigned long start = page;
379 unsigned long end = start + PAGE_SIZE;
380 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
381 unsigned long ws_end = current_cpu_data.icache.ways <<
382 current_cpu_data.icache.waybit;
383 unsigned long ws, addr;
384
385 for (ws = 0; ws < ws_end; ws += ws_inc)
386 for (addr = start; addr < end; addr += 0x400)
387 cache32_unroll32(addr|ws,Index_Invalidate_I);
388}
389
390static inline void blast_scache32(void)
391{
392 unsigned long start = INDEX_BASE;
393 unsigned long end = start + current_cpu_data.scache.waysize;
394 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
395 unsigned long ws_end = current_cpu_data.scache.ways <<
396 current_cpu_data.scache.waybit;
397 unsigned long ws, addr;
398
399 for (ws = 0; ws < ws_end; ws += ws_inc)
400 for (addr = start; addr < end; addr += 0x400)
401 cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
402}
403
404static inline void blast_scache32_page(unsigned long page)
405{
406 unsigned long start = page;
407 unsigned long end = page + PAGE_SIZE;
408
409 do {
410 cache32_unroll32(start,Hit_Writeback_Inv_SD);
411 start += 0x400;
412 } while (start < end);
413}
414
415static inline void blast_scache32_page_indexed(unsigned long page)
416{
417 unsigned long start = page;
418 unsigned long end = start + PAGE_SIZE;
419 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
420 unsigned long ws_end = current_cpu_data.scache.ways <<
421 current_cpu_data.scache.waybit;
422 unsigned long ws, addr;
423
424 for (ws = 0; ws < ws_end; ws += ws_inc)
425 for (addr = start; addr < end; addr += 0x400)
426 cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
427}
428
429#define cache64_unroll32(base,op) \ 195#define cache64_unroll32(base,op) \
430 __asm__ __volatile__( \ 196 __asm__ __volatile__( \
431 " .set push \n" \ 197 " .set push \n" \
@@ -452,84 +218,6 @@ static inline void blast_scache32_page_indexed(unsigned long page)
452 : "r" (base), \ 218 : "r" (base), \
453 "i" (op)); 219 "i" (op));
454 220
455static inline void blast_icache64(void)
456{
457 unsigned long start = INDEX_BASE;
458 unsigned long end = start + current_cpu_data.icache.waysize;
459 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
460 unsigned long ws_end = current_cpu_data.icache.ways <<
461 current_cpu_data.icache.waybit;
462 unsigned long ws, addr;
463
464 for (ws = 0; ws < ws_end; ws += ws_inc)
465 for (addr = start; addr < end; addr += 0x800)
466 cache64_unroll32(addr|ws,Index_Invalidate_I);
467}
468
469static inline void blast_icache64_page(unsigned long page)
470{
471 unsigned long start = page;
472 unsigned long end = start + PAGE_SIZE;
473
474 do {
475 cache64_unroll32(start,Hit_Invalidate_I);
476 start += 0x800;
477 } while (start < end);
478}
479
480static inline void blast_icache64_page_indexed(unsigned long page)
481{
482 unsigned long start = page;
483 unsigned long end = start + PAGE_SIZE;
484 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
485 unsigned long ws_end = current_cpu_data.icache.ways <<
486 current_cpu_data.icache.waybit;
487 unsigned long ws, addr;
488
489 for (ws = 0; ws < ws_end; ws += ws_inc)
490 for (addr = start; addr < end; addr += 0x800)
491 cache64_unroll32(addr|ws,Index_Invalidate_I);
492}
493
494static inline void blast_scache64(void)
495{
496 unsigned long start = INDEX_BASE;
497 unsigned long end = start + current_cpu_data.scache.waysize;
498 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
499 unsigned long ws_end = current_cpu_data.scache.ways <<
500 current_cpu_data.scache.waybit;
501 unsigned long ws, addr;
502
503 for (ws = 0; ws < ws_end; ws += ws_inc)
504 for (addr = start; addr < end; addr += 0x800)
505 cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
506}
507
508static inline void blast_scache64_page(unsigned long page)
509{
510 unsigned long start = page;
511 unsigned long end = page + PAGE_SIZE;
512
513 do {
514 cache64_unroll32(start,Hit_Writeback_Inv_SD);
515 start += 0x800;
516 } while (start < end);
517}
518
519static inline void blast_scache64_page_indexed(unsigned long page)
520{
521 unsigned long start = page;
522 unsigned long end = start + PAGE_SIZE;
523 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
524 unsigned long ws_end = current_cpu_data.scache.ways <<
525 current_cpu_data.scache.waybit;
526 unsigned long ws, addr;
527
528 for (ws = 0; ws < ws_end; ws += ws_inc)
529 for (addr = start; addr < end; addr += 0x800)
530 cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
531}
532
533#define cache128_unroll32(base,op) \ 221#define cache128_unroll32(base,op) \
534 __asm__ __volatile__( \ 222 __asm__ __volatile__( \
535 " .set push \n" \ 223 " .set push \n" \
@@ -556,43 +244,55 @@ static inline void blast_scache64_page_indexed(unsigned long page)
556 : "r" (base), \ 244 : "r" (base), \
557 "i" (op)); 245 "i" (op));
558 246
559static inline void blast_scache128(void) 247/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
560{ 248#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
561 unsigned long start = INDEX_BASE; 249static inline void blast_##pfx##cache##lsize(void) \
562 unsigned long end = start + current_cpu_data.scache.waysize; 250{ \
563 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; 251 unsigned long start = INDEX_BASE; \
564 unsigned long ws_end = current_cpu_data.scache.ways << 252 unsigned long end = start + current_cpu_data.desc.waysize; \
565 current_cpu_data.scache.waybit; 253 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
566 unsigned long ws, addr; 254 unsigned long ws_end = current_cpu_data.desc.ways << \
567 255 current_cpu_data.desc.waybit; \
568 for (ws = 0; ws < ws_end; ws += ws_inc) 256 unsigned long ws, addr; \
569 for (addr = start; addr < end; addr += 0x1000) 257 \
570 cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); 258 for (ws = 0; ws < ws_end; ws += ws_inc) \
571} 259 for (addr = start; addr < end; addr += lsize * 32) \
572 260 cache##lsize##_unroll32(addr|ws,indexop); \
573static inline void blast_scache128_page(unsigned long page) 261} \
574{ 262 \
575 unsigned long start = page; 263static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
576 unsigned long end = page + PAGE_SIZE; 264{ \
577 265 unsigned long start = page; \
578 do { 266 unsigned long end = page + PAGE_SIZE; \
579 cache128_unroll32(start,Hit_Writeback_Inv_SD); 267 \
580 start += 0x1000; 268 do { \
581 } while (start < end); 269 cache##lsize##_unroll32(start,hitop); \
582} 270 start += lsize * 32; \
583 271 } while (start < end); \
584static inline void blast_scache128_page_indexed(unsigned long page) 272} \
585{ 273 \
586 unsigned long start = page; 274static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
587 unsigned long end = start + PAGE_SIZE; 275{ \
588 unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit; 276 unsigned long start = page; \
589 unsigned long ws_end = current_cpu_data.scache.ways << 277 unsigned long end = start + PAGE_SIZE; \
590 current_cpu_data.scache.waybit; 278 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
591 unsigned long ws, addr; 279 unsigned long ws_end = current_cpu_data.desc.ways << \
592 280 current_cpu_data.desc.waybit; \
593 for (ws = 0; ws < ws_end; ws += ws_inc) 281 unsigned long ws, addr; \
594 for (addr = start; addr < end; addr += 0x1000) 282 \
595 cache128_unroll32(addr|ws,Index_Writeback_Inv_SD); 283 for (ws = 0; ws < ws_end; ws += ws_inc) \
596} 284 for (addr = start; addr < end; addr += lsize * 32) \
285 cache##lsize##_unroll32(addr|ws,indexop); \
286}
287
288__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
289__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
290__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
291__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
292__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
293__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
294__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
295__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
296__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
597 297
598#endif /* _ASM_R4KCACHE_H */ 298#endif /* _ASM_R4KCACHE_H */