aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/c-r4k.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r--arch/mips/mm/c-r4k.c145
1 files changed, 82 insertions, 63 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 5ea84bc98c6a..38223b44d962 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -16,6 +16,7 @@
16 16
17#include <asm/bcache.h> 17#include <asm/bcache.h>
18#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
19#include <asm/cache.h>
19#include <asm/cacheops.h> 20#include <asm/cacheops.h>
20#include <asm/cpu.h> 21#include <asm/cpu.h>
21#include <asm/cpu-features.h> 22#include <asm/cpu-features.h>
@@ -26,8 +27,14 @@
26#include <asm/system.h> 27#include <asm/system.h>
27#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
28#include <asm/war.h> 29#include <asm/war.h>
30#include <asm/cacheflush.h> /* for run_uncached() */
29 31
30static unsigned long icache_size, dcache_size, scache_size; 32/*
33 * Must die.
34 */
35static unsigned long icache_size __read_mostly;
36static unsigned long dcache_size __read_mostly;
37static unsigned long scache_size __read_mostly;
31 38
32/* 39/*
33 * Dummy cache handling routines for machines without boardcaches 40 * Dummy cache handling routines for machines without boardcaches
@@ -43,8 +50,8 @@ static struct bcache_ops no_sc_ops = {
43 50
44struct bcache_ops *bcops = &no_sc_ops; 51struct bcache_ops *bcops = &no_sc_ops;
45 52
46#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010) 53#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
47#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020) 54#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
48 55
49#define R4600_HIT_CACHEOP_WAR_IMPL \ 56#define R4600_HIT_CACHEOP_WAR_IMPL \
50do { \ 57do { \
@@ -190,12 +197,12 @@ static inline void r4k_blast_icache_page_indexed_setup(void)
190 if (ic_lsize == 16) 197 if (ic_lsize == 16)
191 r4k_blast_icache_page_indexed = blast_icache16_page_indexed; 198 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
192 else if (ic_lsize == 32) { 199 else if (ic_lsize == 32) {
193 if (TX49XX_ICACHE_INDEX_INV_WAR) 200 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
194 r4k_blast_icache_page_indexed =
195 tx49_blast_icache32_page_indexed;
196 else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
197 r4k_blast_icache_page_indexed = 201 r4k_blast_icache_page_indexed =
198 blast_icache32_r4600_v1_page_indexed; 202 blast_icache32_r4600_v1_page_indexed;
203 else if (TX49XX_ICACHE_INDEX_INV_WAR)
204 r4k_blast_icache_page_indexed =
205 tx49_blast_icache32_page_indexed;
199 else 206 else
200 r4k_blast_icache_page_indexed = 207 r4k_blast_icache_page_indexed =
201 blast_icache32_page_indexed; 208 blast_icache32_page_indexed;
@@ -361,24 +368,33 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
361 368
362struct flush_cache_page_args { 369struct flush_cache_page_args {
363 struct vm_area_struct *vma; 370 struct vm_area_struct *vma;
364 unsigned long page; 371 unsigned long addr;
365}; 372};
366 373
367static inline void local_r4k_flush_cache_page(void *args) 374static inline void local_r4k_flush_cache_page(void *args)
368{ 375{
369 struct flush_cache_page_args *fcp_args = args; 376 struct flush_cache_page_args *fcp_args = args;
370 struct vm_area_struct *vma = fcp_args->vma; 377 struct vm_area_struct *vma = fcp_args->vma;
371 unsigned long page = fcp_args->page; 378 unsigned long addr = fcp_args->addr;
372 int exec = vma->vm_flags & VM_EXEC; 379 int exec = vma->vm_flags & VM_EXEC;
373 struct mm_struct *mm = vma->vm_mm; 380 struct mm_struct *mm = vma->vm_mm;
374 pgd_t *pgdp; 381 pgd_t *pgdp;
382 pud_t *pudp;
375 pmd_t *pmdp; 383 pmd_t *pmdp;
376 pte_t *ptep; 384 pte_t *ptep;
377 385
378 page &= PAGE_MASK; 386 /*
379 pgdp = pgd_offset(mm, page); 387 * If ownes no valid ASID yet, cannot possibly have gotten
380 pmdp = pmd_offset(pgdp, page); 388 * this page into the cache.
381 ptep = pte_offset(pmdp, page); 389 */
390 if (cpu_context(smp_processor_id(), mm) == 0)
391 return;
392
393 addr &= PAGE_MASK;
394 pgdp = pgd_offset(mm, addr);
395 pudp = pud_offset(pgdp, addr);
396 pmdp = pmd_offset(pudp, addr);
397 ptep = pte_offset(pmdp, addr);
382 398
383 /* 399 /*
384 * If the page isn't marked valid, the page cannot possibly be 400 * If the page isn't marked valid, the page cannot possibly be
@@ -395,12 +411,12 @@ static inline void local_r4k_flush_cache_page(void *args)
395 */ 411 */
396 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { 412 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
397 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 413 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
398 r4k_blast_dcache_page(page); 414 r4k_blast_dcache_page(addr);
399 if (exec && !cpu_icache_snoops_remote_store) 415 if (exec && !cpu_icache_snoops_remote_store)
400 r4k_blast_scache_page(page); 416 r4k_blast_scache_page(addr);
401 } 417 }
402 if (exec) 418 if (exec)
403 r4k_blast_icache_page(page); 419 r4k_blast_icache_page(addr);
404 420
405 return; 421 return;
406 } 422 }
@@ -409,36 +425,30 @@ static inline void local_r4k_flush_cache_page(void *args)
409 * Do indexed flush, too much work to get the (possible) TLB refills 425 * Do indexed flush, too much work to get the (possible) TLB refills
410 * to work correctly. 426 * to work correctly.
411 */ 427 */
412 page = INDEX_BASE + (page & (dcache_size - 1)); 428 addr = INDEX_BASE + (addr & (dcache_size - 1));
413 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 429 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
414 r4k_blast_dcache_page_indexed(page); 430 r4k_blast_dcache_page_indexed(addr);
415 if (exec && !cpu_icache_snoops_remote_store) 431 if (exec && !cpu_icache_snoops_remote_store)
416 r4k_blast_scache_page_indexed(page); 432 r4k_blast_scache_page_indexed(addr);
417 } 433 }
418 if (exec) { 434 if (exec) {
419 if (cpu_has_vtag_icache) { 435 if (cpu_has_vtag_icache) {
420 int cpu = smp_processor_id(); 436 int cpu = smp_processor_id();
421 437
422 if (cpu_context(cpu, vma->vm_mm) != 0) 438 if (cpu_context(cpu, mm) != 0)
423 drop_mmu_context(vma->vm_mm, cpu); 439 drop_mmu_context(mm, cpu);
424 } else 440 } else
425 r4k_blast_icache_page_indexed(page); 441 r4k_blast_icache_page_indexed(addr);
426 } 442 }
427} 443}
428 444
429static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) 445static void r4k_flush_cache_page(struct vm_area_struct *vma,
446 unsigned long addr, unsigned long pfn)
430{ 447{
431 struct flush_cache_page_args args; 448 struct flush_cache_page_args args;
432 449
433 /*
434 * If ownes no valid ASID yet, cannot possibly have gotten
435 * this page into the cache.
436 */
437 if (cpu_context(smp_processor_id(), vma->vm_mm) == 0)
438 return;
439
440 args.vma = vma; 450 args.vma = vma;
441 args.page = page; 451 args.addr = addr;
442 452
443 on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 453 on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
444} 454}
@@ -454,16 +464,16 @@ static void r4k_flush_data_cache_page(unsigned long addr)
454} 464}
455 465
456struct flush_icache_range_args { 466struct flush_icache_range_args {
457 unsigned long start; 467 unsigned long __user start;
458 unsigned long end; 468 unsigned long __user end;
459}; 469};
460 470
461static inline void local_r4k_flush_icache_range(void *args) 471static inline void local_r4k_flush_icache_range(void *args)
462{ 472{
463 struct flush_icache_range_args *fir_args = args; 473 struct flush_icache_range_args *fir_args = args;
464 unsigned long dc_lsize = current_cpu_data.dcache.linesz; 474 unsigned long dc_lsize = cpu_dcache_line_size();
465 unsigned long ic_lsize = current_cpu_data.icache.linesz; 475 unsigned long ic_lsize = cpu_icache_line_size();
466 unsigned long sc_lsize = current_cpu_data.scache.linesz; 476 unsigned long sc_lsize = cpu_scache_line_size();
467 unsigned long start = fir_args->start; 477 unsigned long start = fir_args->start;
468 unsigned long end = fir_args->end; 478 unsigned long end = fir_args->end;
469 unsigned long addr, aend; 479 unsigned long addr, aend;
@@ -472,6 +482,7 @@ static inline void local_r4k_flush_icache_range(void *args)
472 if (end - start > dcache_size) { 482 if (end - start > dcache_size) {
473 r4k_blast_dcache(); 483 r4k_blast_dcache();
474 } else { 484 } else {
485 R4600_HIT_CACHEOP_WAR_IMPL;
475 addr = start & ~(dc_lsize - 1); 486 addr = start & ~(dc_lsize - 1);
476 aend = (end - 1) & ~(dc_lsize - 1); 487 aend = (end - 1) & ~(dc_lsize - 1);
477 488
@@ -492,7 +503,7 @@ static inline void local_r4k_flush_icache_range(void *args)
492 aend = (end - 1) & ~(sc_lsize - 1); 503 aend = (end - 1) & ~(sc_lsize - 1);
493 504
494 while (1) { 505 while (1) {
495 /* Hit_Writeback_Inv_D */ 506 /* Hit_Writeback_Inv_SD */
496 protected_writeback_scache_line(addr); 507 protected_writeback_scache_line(addr);
497 if (addr == aend) 508 if (addr == aend)
498 break; 509 break;
@@ -517,7 +528,8 @@ static inline void local_r4k_flush_icache_range(void *args)
517 } 528 }
518} 529}
519 530
520static void r4k_flush_icache_range(unsigned long start, unsigned long end) 531static void r4k_flush_icache_range(unsigned long __user start,
532 unsigned long __user end)
521{ 533{
522 struct flush_icache_range_args args; 534 struct flush_icache_range_args args;
523 535
@@ -525,6 +537,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
525 args.end = end; 537 args.end = end;
526 538
527 on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 539 on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
540 instruction_hazard();
528} 541}
529 542
530/* 543/*
@@ -613,7 +626,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
613 BUG_ON(size == 0); 626 BUG_ON(size == 0);
614 627
615 if (cpu_has_subset_pcaches) { 628 if (cpu_has_subset_pcaches) {
616 unsigned long sc_lsize = current_cpu_data.scache.linesz; 629 unsigned long sc_lsize = cpu_scache_line_size();
617 630
618 if (size >= scache_size) { 631 if (size >= scache_size) {
619 r4k_blast_scache(); 632 r4k_blast_scache();
@@ -639,7 +652,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
639 if (size >= dcache_size) { 652 if (size >= dcache_size) {
640 r4k_blast_dcache(); 653 r4k_blast_dcache();
641 } else { 654 } else {
642 unsigned long dc_lsize = current_cpu_data.dcache.linesz; 655 unsigned long dc_lsize = cpu_dcache_line_size();
643 656
644 R4600_HIT_CACHEOP_WAR_IMPL; 657 R4600_HIT_CACHEOP_WAR_IMPL;
645 a = addr & ~(dc_lsize - 1); 658 a = addr & ~(dc_lsize - 1);
@@ -663,7 +676,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
663 BUG_ON(size == 0); 676 BUG_ON(size == 0);
664 677
665 if (cpu_has_subset_pcaches) { 678 if (cpu_has_subset_pcaches) {
666 unsigned long sc_lsize = current_cpu_data.scache.linesz; 679 unsigned long sc_lsize = cpu_scache_line_size();
667 680
668 if (size >= scache_size) { 681 if (size >= scache_size) {
669 r4k_blast_scache(); 682 r4k_blast_scache();
@@ -684,7 +697,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
684 if (size >= dcache_size) { 697 if (size >= dcache_size) {
685 r4k_blast_dcache(); 698 r4k_blast_dcache();
686 } else { 699 } else {
687 unsigned long dc_lsize = current_cpu_data.dcache.linesz; 700 unsigned long dc_lsize = cpu_dcache_line_size();
688 701
689 R4600_HIT_CACHEOP_WAR_IMPL; 702 R4600_HIT_CACHEOP_WAR_IMPL;
690 a = addr & ~(dc_lsize - 1); 703 a = addr & ~(dc_lsize - 1);
@@ -708,9 +721,9 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
708 */ 721 */
709static void local_r4k_flush_cache_sigtramp(void * arg) 722static void local_r4k_flush_cache_sigtramp(void * arg)
710{ 723{
711 unsigned long ic_lsize = current_cpu_data.icache.linesz; 724 unsigned long ic_lsize = cpu_icache_line_size();
712 unsigned long dc_lsize = current_cpu_data.dcache.linesz; 725 unsigned long dc_lsize = cpu_dcache_line_size();
713 unsigned long sc_lsize = current_cpu_data.scache.linesz; 726 unsigned long sc_lsize = cpu_scache_line_size();
714 unsigned long addr = (unsigned long) arg; 727 unsigned long addr = (unsigned long) arg;
715 728
716 R4600_HIT_CACHEOP_WAR_IMPL; 729 R4600_HIT_CACHEOP_WAR_IMPL;
@@ -762,6 +775,7 @@ static inline void rm7k_erratum31(void)
762 775
763 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { 776 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
764 __asm__ __volatile__ ( 777 __asm__ __volatile__ (
778 ".set push\n\t"
765 ".set noreorder\n\t" 779 ".set noreorder\n\t"
766 ".set mips3\n\t" 780 ".set mips3\n\t"
767 "cache\t%1, 0(%0)\n\t" 781 "cache\t%1, 0(%0)\n\t"
@@ -776,8 +790,7 @@ static inline void rm7k_erratum31(void)
776 "cache\t%1, 0x1000(%0)\n\t" 790 "cache\t%1, 0x1000(%0)\n\t"
777 "cache\t%1, 0x2000(%0)\n\t" 791 "cache\t%1, 0x2000(%0)\n\t"
778 "cache\t%1, 0x3000(%0)\n\t" 792 "cache\t%1, 0x3000(%0)\n\t"
779 ".set\tmips0\n\t" 793 ".set pop\n"
780 ".set\treorder\n\t"
781 : 794 :
782 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); 795 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
783 } 796 }
@@ -1011,9 +1024,19 @@ static void __init probe_pcache(void)
1011 * normally they'd suffer from aliases but magic in the hardware deals 1024 * normally they'd suffer from aliases but magic in the hardware deals
1012 * with that for us so we don't need to take care ourselves. 1025 * with that for us so we don't need to take care ourselves.
1013 */ 1026 */
1014 if (c->cputype != CPU_R10000 && c->cputype != CPU_R12000) 1027 switch (c->cputype) {
1015 if (c->dcache.waysize > PAGE_SIZE) 1028 case CPU_20KC:
1016 c->dcache.flags |= MIPS_CACHE_ALIASES; 1029 case CPU_25KF:
1030 case CPU_R10000:
1031 case CPU_R12000:
1032 case CPU_SB1:
1033 break;
1034 case CPU_24K:
1035 if (!(read_c0_config7() & (1 << 16)))
1036 default:
1037 if (c->dcache.waysize > PAGE_SIZE)
1038 c->dcache.flags |= MIPS_CACHE_ALIASES;
1039 }
1017 1040
1018 switch (c->cputype) { 1041 switch (c->cputype) {
1019 case CPU_20KC: 1042 case CPU_20KC:
@@ -1024,7 +1047,11 @@ static void __init probe_pcache(void)
1024 c->icache.flags |= MIPS_CACHE_VTAG; 1047 c->icache.flags |= MIPS_CACHE_VTAG;
1025 break; 1048 break;
1026 1049
1050 case CPU_AU1000:
1027 case CPU_AU1500: 1051 case CPU_AU1500:
1052 case CPU_AU1100:
1053 case CPU_AU1550:
1054 case CPU_AU1200:
1028 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1055 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1029 break; 1056 break;
1030 } 1057 }
@@ -1102,7 +1129,6 @@ static int __init probe_scache(void)
1102 return 1; 1129 return 1;
1103} 1130}
1104 1131
1105typedef int (*probe_func_t)(unsigned long);
1106extern int r5k_sc_init(void); 1132extern int r5k_sc_init(void);
1107extern int rm7k_sc_init(void); 1133extern int rm7k_sc_init(void);
1108 1134
@@ -1110,7 +1136,6 @@ static void __init setup_scache(void)
1110{ 1136{
1111 struct cpuinfo_mips *c = &current_cpu_data; 1137 struct cpuinfo_mips *c = &current_cpu_data;
1112 unsigned int config = read_c0_config(); 1138 unsigned int config = read_c0_config();
1113 probe_func_t probe_scache_kseg1;
1114 int sc_present = 0; 1139 int sc_present = 0;
1115 1140
1116 /* 1141 /*
@@ -1123,8 +1148,7 @@ static void __init setup_scache(void)
1123 case CPU_R4000MC: 1148 case CPU_R4000MC:
1124 case CPU_R4400SC: 1149 case CPU_R4400SC:
1125 case CPU_R4400MC: 1150 case CPU_R4400MC:
1126 probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache)); 1151 sc_present = run_uncached(probe_scache);
1127 sc_present = probe_scache_kseg1(config);
1128 if (sc_present) 1152 if (sc_present)
1129 c->options |= MIPS_CPU_CACHE_CDEX_S; 1153 c->options |= MIPS_CPU_CACHE_CDEX_S;
1130 break; 1154 break;
@@ -1198,7 +1222,7 @@ static inline void coherency_setup(void)
1198 } 1222 }
1199} 1223}
1200 1224
1201void __init ld_mmu_r4xx0(void) 1225void __init r4k_cache_init(void)
1202{ 1226{
1203 extern void build_clear_page(void); 1227 extern void build_clear_page(void);
1204 extern void build_copy_page(void); 1228 extern void build_copy_page(void);
@@ -1206,15 +1230,11 @@ void __init ld_mmu_r4xx0(void)
1206 struct cpuinfo_mips *c = &current_cpu_data; 1230 struct cpuinfo_mips *c = &current_cpu_data;
1207 1231
1208 /* Default cache error handler for R4000 and R5000 family */ 1232 /* Default cache error handler for R4000 and R5000 family */
1209 memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80); 1233 set_uncached_handler (0x100, &except_vec2_generic, 0x80);
1210 memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80);
1211 1234
1212 probe_pcache(); 1235 probe_pcache();
1213 setup_scache(); 1236 setup_scache();
1214 1237
1215 if (c->dcache.sets * c->dcache.ways > PAGE_SIZE)
1216 c->dcache.flags |= MIPS_CACHE_ALIASES;
1217
1218 r4k_blast_dcache_page_setup(); 1238 r4k_blast_dcache_page_setup();
1219 r4k_blast_dcache_page_indexed_setup(); 1239 r4k_blast_dcache_page_indexed_setup();
1220 r4k_blast_dcache_setup(); 1240 r4k_blast_dcache_setup();
@@ -1252,9 +1272,8 @@ void __init ld_mmu_r4xx0(void)
1252 _dma_cache_inv = r4k_dma_cache_inv; 1272 _dma_cache_inv = r4k_dma_cache_inv;
1253#endif 1273#endif
1254 1274
1255 __flush_cache_all();
1256 coherency_setup();
1257
1258 build_clear_page(); 1275 build_clear_page();
1259 build_copy_page(); 1276 build_copy_page();
1277 local_r4k___flush_cache_all(NULL);
1278 coherency_setup();
1260} 1279}