diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/mm/c-r3k.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 145 | ||||
-rw-r--r-- | arch/mips/mm/c-sb1.c | 10 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 16 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 106 | ||||
-rw-r--r-- | arch/mips/mm/cerr-sb1.c | 54 | ||||
-rw-r--r-- | arch/mips/mm/cex-sb1.S | 5 | ||||
-rw-r--r-- | arch/mips/mm/dma-coherent.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip27.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip32.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 50 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 17 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 19 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 34 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 32 | ||||
-rw-r--r-- | arch/mips/mm/pg-r4k.c | 21 | ||||
-rw-r--r-- | arch/mips/mm/pg-sb1.c | 65 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-32.c | 36 | ||||
-rw-r--r-- | arch/mips/mm/sc-rm7k.c | 39 | ||||
-rw-r--r-- | arch/mips/mm/tlb-andes.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 70 | ||||
-rw-r--r-- | arch/mips/mm/tlb-sb1.c | 376 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 245 |
24 files changed, 513 insertions, 853 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index b56a0abdc3d4..b0178da019f0 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile | |||
@@ -22,7 +22,7 @@ obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r8k.o | |||
22 | obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o | 22 | obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o |
23 | obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o | 23 | obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o |
24 | obj-$(CONFIG_CPU_SB1) += c-sb1.o cerr-sb1.o cex-sb1.o pg-sb1.o \ | 24 | obj-$(CONFIG_CPU_SB1) += c-sb1.o cerr-sb1.o cex-sb1.o pg-sb1.o \ |
25 | tlb-sb1.o | 25 | tlb-r4k.o |
26 | obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o | 26 | obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o |
27 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o | 27 | obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o |
28 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o | 28 | obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o |
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index c659f99eb39a..27f4fa25e8c9 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c | |||
@@ -221,12 +221,14 @@ static inline unsigned long get_phys_page (unsigned long addr, | |||
221 | struct mm_struct *mm) | 221 | struct mm_struct *mm) |
222 | { | 222 | { |
223 | pgd_t *pgd; | 223 | pgd_t *pgd; |
224 | pud_t *pud; | ||
224 | pmd_t *pmd; | 225 | pmd_t *pmd; |
225 | pte_t *pte; | 226 | pte_t *pte; |
226 | unsigned long physpage; | 227 | unsigned long physpage; |
227 | 228 | ||
228 | pgd = pgd_offset(mm, addr); | 229 | pgd = pgd_offset(mm, addr); |
229 | pmd = pmd_offset(pgd, addr); | 230 | pud = pud_offset(pgd, addr); |
231 | pmd = pmd_offset(pud, addr); | ||
230 | pte = pte_offset(pmd, addr); | 232 | pte = pte_offset(pmd, addr); |
231 | 233 | ||
232 | if ((physpage = pte_val(*pte)) & _PAGE_VALID) | 234 | if ((physpage = pte_val(*pte)) & _PAGE_VALID) |
@@ -317,7 +319,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size) | |||
317 | r3k_flush_dcache_range(start, start + size); | 319 | r3k_flush_dcache_range(start, start + size); |
318 | } | 320 | } |
319 | 321 | ||
320 | void __init ld_mmu_r23000(void) | 322 | void __init r3k_cache_init(void) |
321 | { | 323 | { |
322 | extern void build_clear_page(void); | 324 | extern void build_clear_page(void); |
323 | extern void build_copy_page(void); | 325 | extern void build_copy_page(void); |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 5ea84bc98c6a..38223b44d962 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <asm/bcache.h> | 17 | #include <asm/bcache.h> |
18 | #include <asm/bootinfo.h> | 18 | #include <asm/bootinfo.h> |
19 | #include <asm/cache.h> | ||
19 | #include <asm/cacheops.h> | 20 | #include <asm/cacheops.h> |
20 | #include <asm/cpu.h> | 21 | #include <asm/cpu.h> |
21 | #include <asm/cpu-features.h> | 22 | #include <asm/cpu-features.h> |
@@ -26,8 +27,14 @@ | |||
26 | #include <asm/system.h> | 27 | #include <asm/system.h> |
27 | #include <asm/mmu_context.h> | 28 | #include <asm/mmu_context.h> |
28 | #include <asm/war.h> | 29 | #include <asm/war.h> |
30 | #include <asm/cacheflush.h> /* for run_uncached() */ | ||
29 | 31 | ||
30 | static unsigned long icache_size, dcache_size, scache_size; | 32 | /* |
33 | * Must die. | ||
34 | */ | ||
35 | static unsigned long icache_size __read_mostly; | ||
36 | static unsigned long dcache_size __read_mostly; | ||
37 | static unsigned long scache_size __read_mostly; | ||
31 | 38 | ||
32 | /* | 39 | /* |
33 | * Dummy cache handling routines for machines without boardcaches | 40 | * Dummy cache handling routines for machines without boardcaches |
@@ -43,8 +50,8 @@ static struct bcache_ops no_sc_ops = { | |||
43 | 50 | ||
44 | struct bcache_ops *bcops = &no_sc_ops; | 51 | struct bcache_ops *bcops = &no_sc_ops; |
45 | 52 | ||
46 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010) | 53 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) |
47 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020) | 54 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) |
48 | 55 | ||
49 | #define R4600_HIT_CACHEOP_WAR_IMPL \ | 56 | #define R4600_HIT_CACHEOP_WAR_IMPL \ |
50 | do { \ | 57 | do { \ |
@@ -190,12 +197,12 @@ static inline void r4k_blast_icache_page_indexed_setup(void) | |||
190 | if (ic_lsize == 16) | 197 | if (ic_lsize == 16) |
191 | r4k_blast_icache_page_indexed = blast_icache16_page_indexed; | 198 | r4k_blast_icache_page_indexed = blast_icache16_page_indexed; |
192 | else if (ic_lsize == 32) { | 199 | else if (ic_lsize == 32) { |
193 | if (TX49XX_ICACHE_INDEX_INV_WAR) | 200 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) |
194 | r4k_blast_icache_page_indexed = | ||
195 | tx49_blast_icache32_page_indexed; | ||
196 | else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) | ||
197 | r4k_blast_icache_page_indexed = | 201 | r4k_blast_icache_page_indexed = |
198 | blast_icache32_r4600_v1_page_indexed; | 202 | blast_icache32_r4600_v1_page_indexed; |
203 | else if (TX49XX_ICACHE_INDEX_INV_WAR) | ||
204 | r4k_blast_icache_page_indexed = | ||
205 | tx49_blast_icache32_page_indexed; | ||
199 | else | 206 | else |
200 | r4k_blast_icache_page_indexed = | 207 | r4k_blast_icache_page_indexed = |
201 | blast_icache32_page_indexed; | 208 | blast_icache32_page_indexed; |
@@ -361,24 +368,33 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) | |||
361 | 368 | ||
362 | struct flush_cache_page_args { | 369 | struct flush_cache_page_args { |
363 | struct vm_area_struct *vma; | 370 | struct vm_area_struct *vma; |
364 | unsigned long page; | 371 | unsigned long addr; |
365 | }; | 372 | }; |
366 | 373 | ||
367 | static inline void local_r4k_flush_cache_page(void *args) | 374 | static inline void local_r4k_flush_cache_page(void *args) |
368 | { | 375 | { |
369 | struct flush_cache_page_args *fcp_args = args; | 376 | struct flush_cache_page_args *fcp_args = args; |
370 | struct vm_area_struct *vma = fcp_args->vma; | 377 | struct vm_area_struct *vma = fcp_args->vma; |
371 | unsigned long page = fcp_args->page; | 378 | unsigned long addr = fcp_args->addr; |
372 | int exec = vma->vm_flags & VM_EXEC; | 379 | int exec = vma->vm_flags & VM_EXEC; |
373 | struct mm_struct *mm = vma->vm_mm; | 380 | struct mm_struct *mm = vma->vm_mm; |
374 | pgd_t *pgdp; | 381 | pgd_t *pgdp; |
382 | pud_t *pudp; | ||
375 | pmd_t *pmdp; | 383 | pmd_t *pmdp; |
376 | pte_t *ptep; | 384 | pte_t *ptep; |
377 | 385 | ||
378 | page &= PAGE_MASK; | 386 | /* |
379 | pgdp = pgd_offset(mm, page); | 387 | * If ownes no valid ASID yet, cannot possibly have gotten |
380 | pmdp = pmd_offset(pgdp, page); | 388 | * this page into the cache. |
381 | ptep = pte_offset(pmdp, page); | 389 | */ |
390 | if (cpu_context(smp_processor_id(), mm) == 0) | ||
391 | return; | ||
392 | |||
393 | addr &= PAGE_MASK; | ||
394 | pgdp = pgd_offset(mm, addr); | ||
395 | pudp = pud_offset(pgdp, addr); | ||
396 | pmdp = pmd_offset(pudp, addr); | ||
397 | ptep = pte_offset(pmdp, addr); | ||
382 | 398 | ||
383 | /* | 399 | /* |
384 | * If the page isn't marked valid, the page cannot possibly be | 400 | * If the page isn't marked valid, the page cannot possibly be |
@@ -395,12 +411,12 @@ static inline void local_r4k_flush_cache_page(void *args) | |||
395 | */ | 411 | */ |
396 | if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { | 412 | if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { |
397 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { | 413 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { |
398 | r4k_blast_dcache_page(page); | 414 | r4k_blast_dcache_page(addr); |
399 | if (exec && !cpu_icache_snoops_remote_store) | 415 | if (exec && !cpu_icache_snoops_remote_store) |
400 | r4k_blast_scache_page(page); | 416 | r4k_blast_scache_page(addr); |
401 | } | 417 | } |
402 | if (exec) | 418 | if (exec) |
403 | r4k_blast_icache_page(page); | 419 | r4k_blast_icache_page(addr); |
404 | 420 | ||
405 | return; | 421 | return; |
406 | } | 422 | } |
@@ -409,36 +425,30 @@ static inline void local_r4k_flush_cache_page(void *args) | |||
409 | * Do indexed flush, too much work to get the (possible) TLB refills | 425 | * Do indexed flush, too much work to get the (possible) TLB refills |
410 | * to work correctly. | 426 | * to work correctly. |
411 | */ | 427 | */ |
412 | page = INDEX_BASE + (page & (dcache_size - 1)); | 428 | addr = INDEX_BASE + (addr & (dcache_size - 1)); |
413 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { | 429 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { |
414 | r4k_blast_dcache_page_indexed(page); | 430 | r4k_blast_dcache_page_indexed(addr); |
415 | if (exec && !cpu_icache_snoops_remote_store) | 431 | if (exec && !cpu_icache_snoops_remote_store) |
416 | r4k_blast_scache_page_indexed(page); | 432 | r4k_blast_scache_page_indexed(addr); |
417 | } | 433 | } |
418 | if (exec) { | 434 | if (exec) { |
419 | if (cpu_has_vtag_icache) { | 435 | if (cpu_has_vtag_icache) { |
420 | int cpu = smp_processor_id(); | 436 | int cpu = smp_processor_id(); |
421 | 437 | ||
422 | if (cpu_context(cpu, vma->vm_mm) != 0) | 438 | if (cpu_context(cpu, mm) != 0) |
423 | drop_mmu_context(vma->vm_mm, cpu); | 439 | drop_mmu_context(mm, cpu); |
424 | } else | 440 | } else |
425 | r4k_blast_icache_page_indexed(page); | 441 | r4k_blast_icache_page_indexed(addr); |
426 | } | 442 | } |
427 | } | 443 | } |
428 | 444 | ||
429 | static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) | 445 | static void r4k_flush_cache_page(struct vm_area_struct *vma, |
446 | unsigned long addr, unsigned long pfn) | ||
430 | { | 447 | { |
431 | struct flush_cache_page_args args; | 448 | struct flush_cache_page_args args; |
432 | 449 | ||
433 | /* | ||
434 | * If ownes no valid ASID yet, cannot possibly have gotten | ||
435 | * this page into the cache. | ||
436 | */ | ||
437 | if (cpu_context(smp_processor_id(), vma->vm_mm) == 0) | ||
438 | return; | ||
439 | |||
440 | args.vma = vma; | 450 | args.vma = vma; |
441 | args.page = page; | 451 | args.addr = addr; |
442 | 452 | ||
443 | on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); | 453 | on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); |
444 | } | 454 | } |
@@ -454,16 +464,16 @@ static void r4k_flush_data_cache_page(unsigned long addr) | |||
454 | } | 464 | } |
455 | 465 | ||
456 | struct flush_icache_range_args { | 466 | struct flush_icache_range_args { |
457 | unsigned long start; | 467 | unsigned long __user start; |
458 | unsigned long end; | 468 | unsigned long __user end; |
459 | }; | 469 | }; |
460 | 470 | ||
461 | static inline void local_r4k_flush_icache_range(void *args) | 471 | static inline void local_r4k_flush_icache_range(void *args) |
462 | { | 472 | { |
463 | struct flush_icache_range_args *fir_args = args; | 473 | struct flush_icache_range_args *fir_args = args; |
464 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | 474 | unsigned long dc_lsize = cpu_dcache_line_size(); |
465 | unsigned long ic_lsize = current_cpu_data.icache.linesz; | 475 | unsigned long ic_lsize = cpu_icache_line_size(); |
466 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | 476 | unsigned long sc_lsize = cpu_scache_line_size(); |
467 | unsigned long start = fir_args->start; | 477 | unsigned long start = fir_args->start; |
468 | unsigned long end = fir_args->end; | 478 | unsigned long end = fir_args->end; |
469 | unsigned long addr, aend; | 479 | unsigned long addr, aend; |
@@ -472,6 +482,7 @@ static inline void local_r4k_flush_icache_range(void *args) | |||
472 | if (end - start > dcache_size) { | 482 | if (end - start > dcache_size) { |
473 | r4k_blast_dcache(); | 483 | r4k_blast_dcache(); |
474 | } else { | 484 | } else { |
485 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
475 | addr = start & ~(dc_lsize - 1); | 486 | addr = start & ~(dc_lsize - 1); |
476 | aend = (end - 1) & ~(dc_lsize - 1); | 487 | aend = (end - 1) & ~(dc_lsize - 1); |
477 | 488 | ||
@@ -492,7 +503,7 @@ static inline void local_r4k_flush_icache_range(void *args) | |||
492 | aend = (end - 1) & ~(sc_lsize - 1); | 503 | aend = (end - 1) & ~(sc_lsize - 1); |
493 | 504 | ||
494 | while (1) { | 505 | while (1) { |
495 | /* Hit_Writeback_Inv_D */ | 506 | /* Hit_Writeback_Inv_SD */ |
496 | protected_writeback_scache_line(addr); | 507 | protected_writeback_scache_line(addr); |
497 | if (addr == aend) | 508 | if (addr == aend) |
498 | break; | 509 | break; |
@@ -517,7 +528,8 @@ static inline void local_r4k_flush_icache_range(void *args) | |||
517 | } | 528 | } |
518 | } | 529 | } |
519 | 530 | ||
520 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) | 531 | static void r4k_flush_icache_range(unsigned long __user start, |
532 | unsigned long __user end) | ||
521 | { | 533 | { |
522 | struct flush_icache_range_args args; | 534 | struct flush_icache_range_args args; |
523 | 535 | ||
@@ -525,6 +537,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) | |||
525 | args.end = end; | 537 | args.end = end; |
526 | 538 | ||
527 | on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); | 539 | on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); |
540 | instruction_hazard(); | ||
528 | } | 541 | } |
529 | 542 | ||
530 | /* | 543 | /* |
@@ -613,7 +626,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
613 | BUG_ON(size == 0); | 626 | BUG_ON(size == 0); |
614 | 627 | ||
615 | if (cpu_has_subset_pcaches) { | 628 | if (cpu_has_subset_pcaches) { |
616 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | 629 | unsigned long sc_lsize = cpu_scache_line_size(); |
617 | 630 | ||
618 | if (size >= scache_size) { | 631 | if (size >= scache_size) { |
619 | r4k_blast_scache(); | 632 | r4k_blast_scache(); |
@@ -639,7 +652,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
639 | if (size >= dcache_size) { | 652 | if (size >= dcache_size) { |
640 | r4k_blast_dcache(); | 653 | r4k_blast_dcache(); |
641 | } else { | 654 | } else { |
642 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | 655 | unsigned long dc_lsize = cpu_dcache_line_size(); |
643 | 656 | ||
644 | R4600_HIT_CACHEOP_WAR_IMPL; | 657 | R4600_HIT_CACHEOP_WAR_IMPL; |
645 | a = addr & ~(dc_lsize - 1); | 658 | a = addr & ~(dc_lsize - 1); |
@@ -663,7 +676,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
663 | BUG_ON(size == 0); | 676 | BUG_ON(size == 0); |
664 | 677 | ||
665 | if (cpu_has_subset_pcaches) { | 678 | if (cpu_has_subset_pcaches) { |
666 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | 679 | unsigned long sc_lsize = cpu_scache_line_size(); |
667 | 680 | ||
668 | if (size >= scache_size) { | 681 | if (size >= scache_size) { |
669 | r4k_blast_scache(); | 682 | r4k_blast_scache(); |
@@ -684,7 +697,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
684 | if (size >= dcache_size) { | 697 | if (size >= dcache_size) { |
685 | r4k_blast_dcache(); | 698 | r4k_blast_dcache(); |
686 | } else { | 699 | } else { |
687 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | 700 | unsigned long dc_lsize = cpu_dcache_line_size(); |
688 | 701 | ||
689 | R4600_HIT_CACHEOP_WAR_IMPL; | 702 | R4600_HIT_CACHEOP_WAR_IMPL; |
690 | a = addr & ~(dc_lsize - 1); | 703 | a = addr & ~(dc_lsize - 1); |
@@ -708,9 +721,9 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
708 | */ | 721 | */ |
709 | static void local_r4k_flush_cache_sigtramp(void * arg) | 722 | static void local_r4k_flush_cache_sigtramp(void * arg) |
710 | { | 723 | { |
711 | unsigned long ic_lsize = current_cpu_data.icache.linesz; | 724 | unsigned long ic_lsize = cpu_icache_line_size(); |
712 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | 725 | unsigned long dc_lsize = cpu_dcache_line_size(); |
713 | unsigned long sc_lsize = current_cpu_data.scache.linesz; | 726 | unsigned long sc_lsize = cpu_scache_line_size(); |
714 | unsigned long addr = (unsigned long) arg; | 727 | unsigned long addr = (unsigned long) arg; |
715 | 728 | ||
716 | R4600_HIT_CACHEOP_WAR_IMPL; | 729 | R4600_HIT_CACHEOP_WAR_IMPL; |
@@ -762,6 +775,7 @@ static inline void rm7k_erratum31(void) | |||
762 | 775 | ||
763 | for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { | 776 | for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { |
764 | __asm__ __volatile__ ( | 777 | __asm__ __volatile__ ( |
778 | ".set push\n\t" | ||
765 | ".set noreorder\n\t" | 779 | ".set noreorder\n\t" |
766 | ".set mips3\n\t" | 780 | ".set mips3\n\t" |
767 | "cache\t%1, 0(%0)\n\t" | 781 | "cache\t%1, 0(%0)\n\t" |
@@ -776,8 +790,7 @@ static inline void rm7k_erratum31(void) | |||
776 | "cache\t%1, 0x1000(%0)\n\t" | 790 | "cache\t%1, 0x1000(%0)\n\t" |
777 | "cache\t%1, 0x2000(%0)\n\t" | 791 | "cache\t%1, 0x2000(%0)\n\t" |
778 | "cache\t%1, 0x3000(%0)\n\t" | 792 | "cache\t%1, 0x3000(%0)\n\t" |
779 | ".set\tmips0\n\t" | 793 | ".set pop\n" |
780 | ".set\treorder\n\t" | ||
781 | : | 794 | : |
782 | : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); | 795 | : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); |
783 | } | 796 | } |
@@ -1011,9 +1024,19 @@ static void __init probe_pcache(void) | |||
1011 | * normally they'd suffer from aliases but magic in the hardware deals | 1024 | * normally they'd suffer from aliases but magic in the hardware deals |
1012 | * with that for us so we don't need to take care ourselves. | 1025 | * with that for us so we don't need to take care ourselves. |
1013 | */ | 1026 | */ |
1014 | if (c->cputype != CPU_R10000 && c->cputype != CPU_R12000) | 1027 | switch (c->cputype) { |
1015 | if (c->dcache.waysize > PAGE_SIZE) | 1028 | case CPU_20KC: |
1016 | c->dcache.flags |= MIPS_CACHE_ALIASES; | 1029 | case CPU_25KF: |
1030 | case CPU_R10000: | ||
1031 | case CPU_R12000: | ||
1032 | case CPU_SB1: | ||
1033 | break; | ||
1034 | case CPU_24K: | ||
1035 | if (!(read_c0_config7() & (1 << 16))) | ||
1036 | default: | ||
1037 | if (c->dcache.waysize > PAGE_SIZE) | ||
1038 | c->dcache.flags |= MIPS_CACHE_ALIASES; | ||
1039 | } | ||
1017 | 1040 | ||
1018 | switch (c->cputype) { | 1041 | switch (c->cputype) { |
1019 | case CPU_20KC: | 1042 | case CPU_20KC: |
@@ -1024,7 +1047,11 @@ static void __init probe_pcache(void) | |||
1024 | c->icache.flags |= MIPS_CACHE_VTAG; | 1047 | c->icache.flags |= MIPS_CACHE_VTAG; |
1025 | break; | 1048 | break; |
1026 | 1049 | ||
1050 | case CPU_AU1000: | ||
1027 | case CPU_AU1500: | 1051 | case CPU_AU1500: |
1052 | case CPU_AU1100: | ||
1053 | case CPU_AU1550: | ||
1054 | case CPU_AU1200: | ||
1028 | c->icache.flags |= MIPS_CACHE_IC_F_DC; | 1055 | c->icache.flags |= MIPS_CACHE_IC_F_DC; |
1029 | break; | 1056 | break; |
1030 | } | 1057 | } |
@@ -1102,7 +1129,6 @@ static int __init probe_scache(void) | |||
1102 | return 1; | 1129 | return 1; |
1103 | } | 1130 | } |
1104 | 1131 | ||
1105 | typedef int (*probe_func_t)(unsigned long); | ||
1106 | extern int r5k_sc_init(void); | 1132 | extern int r5k_sc_init(void); |
1107 | extern int rm7k_sc_init(void); | 1133 | extern int rm7k_sc_init(void); |
1108 | 1134 | ||
@@ -1110,7 +1136,6 @@ static void __init setup_scache(void) | |||
1110 | { | 1136 | { |
1111 | struct cpuinfo_mips *c = ¤t_cpu_data; | 1137 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1112 | unsigned int config = read_c0_config(); | 1138 | unsigned int config = read_c0_config(); |
1113 | probe_func_t probe_scache_kseg1; | ||
1114 | int sc_present = 0; | 1139 | int sc_present = 0; |
1115 | 1140 | ||
1116 | /* | 1141 | /* |
@@ -1123,8 +1148,7 @@ static void __init setup_scache(void) | |||
1123 | case CPU_R4000MC: | 1148 | case CPU_R4000MC: |
1124 | case CPU_R4400SC: | 1149 | case CPU_R4400SC: |
1125 | case CPU_R4400MC: | 1150 | case CPU_R4400MC: |
1126 | probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache)); | 1151 | sc_present = run_uncached(probe_scache); |
1127 | sc_present = probe_scache_kseg1(config); | ||
1128 | if (sc_present) | 1152 | if (sc_present) |
1129 | c->options |= MIPS_CPU_CACHE_CDEX_S; | 1153 | c->options |= MIPS_CPU_CACHE_CDEX_S; |
1130 | break; | 1154 | break; |
@@ -1198,7 +1222,7 @@ static inline void coherency_setup(void) | |||
1198 | } | 1222 | } |
1199 | } | 1223 | } |
1200 | 1224 | ||
1201 | void __init ld_mmu_r4xx0(void) | 1225 | void __init r4k_cache_init(void) |
1202 | { | 1226 | { |
1203 | extern void build_clear_page(void); | 1227 | extern void build_clear_page(void); |
1204 | extern void build_copy_page(void); | 1228 | extern void build_copy_page(void); |
@@ -1206,15 +1230,11 @@ void __init ld_mmu_r4xx0(void) | |||
1206 | struct cpuinfo_mips *c = ¤t_cpu_data; | 1230 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1207 | 1231 | ||
1208 | /* Default cache error handler for R4000 and R5000 family */ | 1232 | /* Default cache error handler for R4000 and R5000 family */ |
1209 | memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80); | 1233 | set_uncached_handler (0x100, &except_vec2_generic, 0x80); |
1210 | memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80); | ||
1211 | 1234 | ||
1212 | probe_pcache(); | 1235 | probe_pcache(); |
1213 | setup_scache(); | 1236 | setup_scache(); |
1214 | 1237 | ||
1215 | if (c->dcache.sets * c->dcache.ways > PAGE_SIZE) | ||
1216 | c->dcache.flags |= MIPS_CACHE_ALIASES; | ||
1217 | |||
1218 | r4k_blast_dcache_page_setup(); | 1238 | r4k_blast_dcache_page_setup(); |
1219 | r4k_blast_dcache_page_indexed_setup(); | 1239 | r4k_blast_dcache_page_indexed_setup(); |
1220 | r4k_blast_dcache_setup(); | 1240 | r4k_blast_dcache_setup(); |
@@ -1252,9 +1272,8 @@ void __init ld_mmu_r4xx0(void) | |||
1252 | _dma_cache_inv = r4k_dma_cache_inv; | 1272 | _dma_cache_inv = r4k_dma_cache_inv; |
1253 | #endif | 1273 | #endif |
1254 | 1274 | ||
1255 | __flush_cache_all(); | ||
1256 | coherency_setup(); | ||
1257 | |||
1258 | build_clear_page(); | 1275 | build_clear_page(); |
1259 | build_copy_page(); | 1276 | build_copy_page(); |
1277 | local_r4k___flush_cache_all(NULL); | ||
1278 | coherency_setup(); | ||
1260 | } | 1279 | } |
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index 502f68c664b2..2f08b535f20e 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c | |||
@@ -235,7 +235,7 @@ static inline void __sb1_flush_icache_range(unsigned long start, | |||
235 | /* | 235 | /* |
236 | * Invalidate all caches on this CPU | 236 | * Invalidate all caches on this CPU |
237 | */ | 237 | */ |
238 | static void local_sb1___flush_cache_all(void) | 238 | static void __attribute_used__ local_sb1___flush_cache_all(void) |
239 | { | 239 | { |
240 | __sb1_writeback_inv_dcache_all(); | 240 | __sb1_writeback_inv_dcache_all(); |
241 | __sb1_flush_icache_all(); | 241 | __sb1_flush_icache_all(); |
@@ -492,19 +492,17 @@ static __init void probe_cache_sizes(void) | |||
492 | } | 492 | } |
493 | 493 | ||
494 | /* | 494 | /* |
495 | * This is called from loadmmu.c. We have to set up all the | 495 | * This is called from cache.c. We have to set up all the |
496 | * memory management function pointers, as well as initialize | 496 | * memory management function pointers, as well as initialize |
497 | * the caches and tlbs | 497 | * the caches and tlbs |
498 | */ | 498 | */ |
499 | void ld_mmu_sb1(void) | 499 | void sb1_cache_init(void) |
500 | { | 500 | { |
501 | extern char except_vec2_sb1; | 501 | extern char except_vec2_sb1; |
502 | extern char handle_vec2_sb1; | 502 | extern char handle_vec2_sb1; |
503 | 503 | ||
504 | /* Special cache error handler for SB1 */ | 504 | /* Special cache error handler for SB1 */ |
505 | memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80); | 505 | set_uncached_handler (0x100, &except_vec2_sb1, 0x80); |
506 | memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80); | ||
507 | memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80); | ||
508 | 506 | ||
509 | probe_cache_sizes(); | 507 | probe_cache_sizes(); |
510 | 508 | ||
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index ff5afab64b2f..0a97a9434eba 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c | |||
@@ -167,15 +167,16 @@ static void tx39_flush_cache_mm(struct mm_struct *mm) | |||
167 | static void tx39_flush_cache_range(struct vm_area_struct *vma, | 167 | static void tx39_flush_cache_range(struct vm_area_struct *vma, |
168 | unsigned long start, unsigned long end) | 168 | unsigned long start, unsigned long end) |
169 | { | 169 | { |
170 | struct mm_struct *mm = vma->vm_mm; | 170 | int exec; |
171 | 171 | ||
172 | if (!cpu_has_dc_aliases) | 172 | if (!(cpu_context(smp_processor_id(), vma->vm_mm))) |
173 | return; | 173 | return; |
174 | 174 | ||
175 | if (cpu_context(smp_processor_id(), mm) != 0) { | 175 | exec = vma->vm_flags & VM_EXEC; |
176 | if (cpu_has_dc_aliases || exec) | ||
176 | tx39_blast_dcache(); | 177 | tx39_blast_dcache(); |
178 | if (exec) | ||
177 | tx39_blast_icache(); | 179 | tx39_blast_icache(); |
178 | } | ||
179 | } | 180 | } |
180 | 181 | ||
181 | static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) | 182 | static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) |
@@ -183,6 +184,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page | |||
183 | int exec = vma->vm_flags & VM_EXEC; | 184 | int exec = vma->vm_flags & VM_EXEC; |
184 | struct mm_struct *mm = vma->vm_mm; | 185 | struct mm_struct *mm = vma->vm_mm; |
185 | pgd_t *pgdp; | 186 | pgd_t *pgdp; |
187 | pud_t *pudp; | ||
186 | pmd_t *pmdp; | 188 | pmd_t *pmdp; |
187 | pte_t *ptep; | 189 | pte_t *ptep; |
188 | 190 | ||
@@ -195,7 +197,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page | |||
195 | 197 | ||
196 | page &= PAGE_MASK; | 198 | page &= PAGE_MASK; |
197 | pgdp = pgd_offset(mm, page); | 199 | pgdp = pgd_offset(mm, page); |
198 | pmdp = pmd_offset(pgdp, page); | 200 | pudp = pud_offset(pgdp, page); |
201 | pmdp = pmd_offset(pudp, page); | ||
199 | ptep = pte_offset(pmdp, page); | 202 | ptep = pte_offset(pmdp, page); |
200 | 203 | ||
201 | /* | 204 | /* |
@@ -407,7 +410,7 @@ static __init void tx39_probe_cache(void) | |||
407 | } | 410 | } |
408 | } | 411 | } |
409 | 412 | ||
410 | void __init ld_mmu_tx39(void) | 413 | void __init tx39_cache_init(void) |
411 | { | 414 | { |
412 | extern void build_clear_page(void); | 415 | extern void build_clear_page(void); |
413 | extern void build_copy_page(void); | 416 | extern void build_copy_page(void); |
@@ -490,4 +493,5 @@ void __init ld_mmu_tx39(void) | |||
490 | 493 | ||
491 | build_clear_page(); | 494 | build_clear_page(); |
492 | build_copy_page(); | 495 | build_copy_page(); |
496 | tx39h_flush_icache_all(); | ||
493 | } | 497 | } |
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 1d95cdb77bed..314701a66b13 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
@@ -23,8 +23,10 @@ void (*__flush_cache_all)(void); | |||
23 | void (*flush_cache_mm)(struct mm_struct *mm); | 23 | void (*flush_cache_mm)(struct mm_struct *mm); |
24 | void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, | 24 | void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, |
25 | unsigned long end); | 25 | unsigned long end); |
26 | void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); | 26 | void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, |
27 | void (*flush_icache_range)(unsigned long start, unsigned long end); | 27 | unsigned long pfn); |
28 | void (*flush_icache_range)(unsigned long __user start, | ||
29 | unsigned long __user end); | ||
28 | void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); | 30 | void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); |
29 | 31 | ||
30 | /* MIPS specific cache operations */ | 32 | /* MIPS specific cache operations */ |
@@ -32,6 +34,8 @@ void (*flush_cache_sigtramp)(unsigned long addr); | |||
32 | void (*flush_data_cache_page)(unsigned long addr); | 34 | void (*flush_data_cache_page)(unsigned long addr); |
33 | void (*flush_icache_all)(void); | 35 | void (*flush_icache_all)(void); |
34 | 36 | ||
37 | EXPORT_SYMBOL(flush_data_cache_page); | ||
38 | |||
35 | #ifdef CONFIG_DMA_NONCOHERENT | 39 | #ifdef CONFIG_DMA_NONCOHERENT |
36 | 40 | ||
37 | /* DMA cache operations. */ | 41 | /* DMA cache operations. */ |
@@ -49,10 +53,12 @@ EXPORT_SYMBOL(_dma_cache_inv); | |||
49 | * We could optimize the case where the cache argument is not BCACHE but | 53 | * We could optimize the case where the cache argument is not BCACHE but |
50 | * that seems very atypical use ... | 54 | * that seems very atypical use ... |
51 | */ | 55 | */ |
52 | asmlinkage int sys_cacheflush(unsigned long addr, unsigned long int bytes, | 56 | asmlinkage int sys_cacheflush(unsigned long __user addr, |
53 | unsigned int cache) | 57 | unsigned long bytes, unsigned int cache) |
54 | { | 58 | { |
55 | if (!access_ok(VERIFY_WRITE, (void *) addr, bytes)) | 59 | if (bytes == 0) |
60 | return 0; | ||
61 | if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) | ||
56 | return -EFAULT; | 62 | return -EFAULT; |
57 | 63 | ||
58 | flush_icache_range(addr, addr + bytes); | 64 | flush_icache_range(addr, addr + bytes); |
@@ -100,58 +106,48 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, | |||
100 | } | 106 | } |
101 | } | 107 | } |
102 | 108 | ||
103 | extern void ld_mmu_r23000(void); | 109 | #define __weak __attribute__((weak)) |
104 | extern void ld_mmu_r4xx0(void); | 110 | |
105 | extern void ld_mmu_tx39(void); | 111 | static char cache_panic[] __initdata = "Yeee, unsupported cache architecture."; |
106 | extern void ld_mmu_r6000(void); | ||
107 | extern void ld_mmu_tfp(void); | ||
108 | extern void ld_mmu_andes(void); | ||
109 | extern void ld_mmu_sb1(void); | ||
110 | 112 | ||
111 | void __init cpu_cache_init(void) | 113 | void __init cpu_cache_init(void) |
112 | { | 114 | { |
113 | if (cpu_has_4ktlb) { | 115 | if (cpu_has_3k_cache) { |
114 | #if defined(CONFIG_CPU_R4X00) || defined(CONFIG_CPU_VR41XX) || \ | 116 | extern void __weak r3k_cache_init(void); |
115 | defined(CONFIG_CPU_R4300) || defined(CONFIG_CPU_R5000) || \ | 117 | |
116 | defined(CONFIG_CPU_NEVADA) || defined(CONFIG_CPU_R5432) || \ | 118 | r3k_cache_init(); |
117 | defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_MIPS32) || \ | 119 | return; |
118 | defined(CONFIG_CPU_MIPS64) || defined(CONFIG_CPU_TX49XX) || \ | 120 | } |
119 | defined(CONFIG_CPU_RM7000) || defined(CONFIG_CPU_RM9000) | 121 | if (cpu_has_6k_cache) { |
120 | ld_mmu_r4xx0(); | 122 | extern void __weak r6k_cache_init(void); |
121 | #endif | 123 | |
122 | } else switch (current_cpu_data.cputype) { | 124 | r6k_cache_init(); |
123 | #ifdef CONFIG_CPU_R3000 | 125 | return; |
124 | case CPU_R2000: | 126 | } |
125 | case CPU_R3000: | 127 | if (cpu_has_4k_cache) { |
126 | case CPU_R3000A: | 128 | extern void __weak r4k_cache_init(void); |
127 | case CPU_R3081E: | 129 | |
128 | ld_mmu_r23000(); | 130 | r4k_cache_init(); |
129 | break; | 131 | return; |
130 | #endif | ||
131 | #ifdef CONFIG_CPU_TX39XX | ||
132 | case CPU_TX3912: | ||
133 | case CPU_TX3922: | ||
134 | case CPU_TX3927: | ||
135 | ld_mmu_tx39(); | ||
136 | break; | ||
137 | #endif | ||
138 | #ifdef CONFIG_CPU_R10000 | ||
139 | case CPU_R10000: | ||
140 | case CPU_R12000: | ||
141 | ld_mmu_r4xx0(); | ||
142 | break; | ||
143 | #endif | ||
144 | #ifdef CONFIG_CPU_SB1 | ||
145 | case CPU_SB1: | ||
146 | ld_mmu_sb1(); | ||
147 | break; | ||
148 | #endif | ||
149 | |||
150 | case CPU_R8000: | ||
151 | panic("R8000 is unsupported"); | ||
152 | break; | ||
153 | |||
154 | default: | ||
155 | panic("Yeee, unsupported cache architecture."); | ||
156 | } | 132 | } |
133 | if (cpu_has_8k_cache) { | ||
134 | extern void __weak r8k_cache_init(void); | ||
135 | |||
136 | r8k_cache_init(); | ||
137 | return; | ||
138 | } | ||
139 | if (cpu_has_tx39_cache) { | ||
140 | extern void __weak tx39_cache_init(void); | ||
141 | |||
142 | tx39_cache_init(); | ||
143 | return; | ||
144 | } | ||
145 | if (cpu_has_sb1_cache) { | ||
146 | extern void __weak sb1_cache_init(void); | ||
147 | |||
148 | sb1_cache_init(); | ||
149 | return; | ||
150 | } | ||
151 | |||
152 | panic(cache_panic); | ||
157 | } | 153 | } |
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c index 7166ffe63502..1cf3c6006ccd 100644 --- a/arch/mips/mm/cerr-sb1.c +++ b/arch/mips/mm/cerr-sb1.c | |||
@@ -19,13 +19,19 @@ | |||
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <asm/mipsregs.h> | 20 | #include <asm/mipsregs.h> |
21 | #include <asm/sibyte/sb1250.h> | 21 | #include <asm/sibyte/sb1250.h> |
22 | #include <asm/sibyte/sb1250_regs.h> | ||
22 | 23 | ||
23 | #ifndef CONFIG_SIBYTE_BUS_WATCHER | 24 | #if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE) |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/sibyte/sb1250_regs.h> | ||
26 | #include <asm/sibyte/sb1250_scd.h> | 26 | #include <asm/sibyte/sb1250_scd.h> |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | /* | ||
30 | * We'd like to dump the L2_ECC_TAG register on errors, but errata make | ||
31 | * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.) | ||
32 | */ | ||
33 | #undef DUMP_L2_ECC_TAG_ON_ERROR | ||
34 | |||
29 | /* SB1 definitions */ | 35 | /* SB1 definitions */ |
30 | 36 | ||
31 | /* XXX should come from config1 XXX */ | 37 | /* XXX should come from config1 XXX */ |
@@ -139,12 +145,18 @@ static inline void breakout_cerrd(unsigned int val) | |||
139 | static void check_bus_watcher(void) | 145 | static void check_bus_watcher(void) |
140 | { | 146 | { |
141 | uint32_t status, l2_err, memio_err; | 147 | uint32_t status, l2_err, memio_err; |
148 | #ifdef DUMP_L2_ECC_TAG_ON_ERROR | ||
149 | uint64_t l2_tag; | ||
150 | #endif | ||
142 | 151 | ||
143 | /* Destructive read, clears register and interrupt */ | 152 | /* Destructive read, clears register and interrupt */ |
144 | status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); | 153 | status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); |
145 | /* Bit 31 is always on, but there's no #define for that */ | 154 | /* Bit 31 is always on, but there's no #define for that */ |
146 | if (status & ~(1UL << 31)) { | 155 | if (status & ~(1UL << 31)) { |
147 | l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS)); | 156 | l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS)); |
157 | #ifdef DUMP_L2_ECC_TAG_ON_ERROR | ||
158 | l2_tag = in64(IO_SPACE_BASE | A_L2_ECC_TAG); | ||
159 | #endif | ||
148 | memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS)); | 160 | memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS)); |
149 | prom_printf("Bus watcher error counters: %08x %08x\n", l2_err, memio_err); | 161 | prom_printf("Bus watcher error counters: %08x %08x\n", l2_err, memio_err); |
150 | prom_printf("\nLast recorded signature:\n"); | 162 | prom_printf("\nLast recorded signature:\n"); |
@@ -153,6 +165,9 @@ static void check_bus_watcher(void) | |||
153 | (int)(G_SCD_BERR_TID(status) >> 6), | 165 | (int)(G_SCD_BERR_TID(status) >> 6), |
154 | (int)G_SCD_BERR_RID(status), | 166 | (int)G_SCD_BERR_RID(status), |
155 | (int)G_SCD_BERR_DCODE(status)); | 167 | (int)G_SCD_BERR_DCODE(status)); |
168 | #ifdef DUMP_L2_ECC_TAG_ON_ERROR | ||
169 | prom_printf("Last L2 tag w/ bad ECC: %016llx\n", l2_tag); | ||
170 | #endif | ||
156 | } else { | 171 | } else { |
157 | prom_printf("Bus watcher indicates no error\n"); | 172 | prom_printf("Bus watcher indicates no error\n"); |
158 | } | 173 | } |
@@ -166,6 +181,16 @@ asmlinkage void sb1_cache_error(void) | |||
166 | uint64_t cerr_dpa; | 181 | uint64_t cerr_dpa; |
167 | uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res; | 182 | uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res; |
168 | 183 | ||
184 | #ifdef CONFIG_SIBYTE_BW_TRACE | ||
185 | /* Freeze the trace buffer now */ | ||
186 | #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) | ||
187 | csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IO_SPACE_BASE | A_SCD_TRACE_CFG); | ||
188 | #else | ||
189 | csr_out32(M_SCD_TRACE_CFG_FREEZE, IO_SPACE_BASE | A_SCD_TRACE_CFG); | ||
190 | #endif | ||
191 | prom_printf("Trace buffer frozen\n"); | ||
192 | #endif | ||
193 | |||
169 | prom_printf("Cache error exception on CPU %x:\n", | 194 | prom_printf("Cache error exception on CPU %x:\n", |
170 | (read_c0_prid() >> 25) & 0x7); | 195 | (read_c0_prid() >> 25) & 0x7); |
171 | 196 | ||
@@ -229,11 +254,19 @@ asmlinkage void sb1_cache_error(void) | |||
229 | 254 | ||
230 | check_bus_watcher(); | 255 | check_bus_watcher(); |
231 | 256 | ||
232 | while (1); | ||
233 | /* | 257 | /* |
234 | * This tends to make things get really ugly; let's just stall instead. | 258 | * Calling panic() when a fatal cache error occurs scrambles the |
235 | * panic("Can't handle the cache error!"); | 259 | * state of the system (and the cache), making it difficult to |
260 | * investigate after the fact. However, if you just stall the CPU, | ||
261 | * the other CPU may keep on running, which is typically very | ||
262 | * undesirable. | ||
236 | */ | 263 | */ |
264 | #ifdef CONFIG_SB1_CERR_STALL | ||
265 | while (1) | ||
266 | ; | ||
267 | #else | ||
268 | panic("unhandled cache error"); | ||
269 | #endif | ||
237 | } | 270 | } |
238 | 271 | ||
239 | 272 | ||
@@ -434,7 +467,8 @@ static struct dc_state dc_states[] = { | |||
434 | }; | 467 | }; |
435 | 468 | ||
436 | #define DC_TAG_VALID(state) \ | 469 | #define DC_TAG_VALID(state) \ |
437 | (((state) == 0xf) || ((state) == 0x13) || ((state) == 0x19) || ((state == 0x16)) || ((state) == 0x1c)) | 470 | (((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \ |
471 | ((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c)) | ||
438 | 472 | ||
439 | static char *dc_state_str(unsigned char state) | 473 | static char *dc_state_str(unsigned char state) |
440 | { | 474 | { |
@@ -505,6 +539,7 @@ static uint32_t extract_dc(unsigned short addr, int data) | |||
505 | uint64_t datalo; | 539 | uint64_t datalo; |
506 | uint32_t datalohi, datalolo, datahi; | 540 | uint32_t datalohi, datalolo, datahi; |
507 | int offset; | 541 | int offset; |
542 | char bad_ecc = 0; | ||
508 | 543 | ||
509 | for (offset = 0; offset < 4; offset++) { | 544 | for (offset = 0; offset < 4; offset++) { |
510 | /* Index-load-data-D */ | 545 | /* Index-load-data-D */ |
@@ -525,8 +560,7 @@ static uint32_t extract_dc(unsigned short addr, int data) | |||
525 | ecc = dc_ecc(datalo); | 560 | ecc = dc_ecc(datalo); |
526 | if (ecc != datahi) { | 561 | if (ecc != datahi) { |
527 | int bits = 0; | 562 | int bits = 0; |
528 | prom_printf(" ** bad ECC (%02x %02x) ->", | 563 | bad_ecc |= 1 << (3-offset); |
529 | datahi, ecc); | ||
530 | ecc ^= datahi; | 564 | ecc ^= datahi; |
531 | while (ecc) { | 565 | while (ecc) { |
532 | if (ecc & 1) bits++; | 566 | if (ecc & 1) bits++; |
@@ -537,6 +571,10 @@ static uint32_t extract_dc(unsigned short addr, int data) | |||
537 | prom_printf(" %02X-%016llX", datahi, datalo); | 571 | prom_printf(" %02X-%016llX", datahi, datalo); |
538 | } | 572 | } |
539 | prom_printf("\n"); | 573 | prom_printf("\n"); |
574 | if (bad_ecc) | ||
575 | prom_printf(" dwords w/ bad ECC: %d %d %d %d\n", | ||
576 | !!(bad_ecc & 8), !!(bad_ecc & 4), | ||
577 | !!(bad_ecc & 2), !!(bad_ecc & 1)); | ||
540 | } | 578 | } |
541 | } | 579 | } |
542 | return res; | 580 | return res; |
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S index 2c3a23aa88c3..0e71580774ff 100644 --- a/arch/mips/mm/cex-sb1.S +++ b/arch/mips/mm/cex-sb1.S | |||
@@ -64,6 +64,10 @@ LEAF(except_vec2_sb1) | |||
64 | sd k0,0x170($0) | 64 | sd k0,0x170($0) |
65 | sd k1,0x178($0) | 65 | sd k1,0x178($0) |
66 | 66 | ||
67 | #if CONFIG_SB1_CEX_ALWAYS_FATAL | ||
68 | j handle_vec2_sb1 | ||
69 | nop | ||
70 | #else | ||
67 | /* | 71 | /* |
68 | * M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell | 72 | * M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell |
69 | * if we can fast-path out of here for a h/w-recovered error. | 73 | * if we can fast-path out of here for a h/w-recovered error. |
@@ -134,6 +138,7 @@ unrecoverable: | |||
134 | /* Unrecoverable Icache or Dcache error; log it and/or fail */ | 138 | /* Unrecoverable Icache or Dcache error; log it and/or fail */ |
135 | j handle_vec2_sb1 | 139 | j handle_vec2_sb1 |
136 | nop | 140 | nop |
141 | #endif | ||
137 | 142 | ||
138 | END(except_vec2_sb1) | 143 | END(except_vec2_sb1) |
139 | 144 | ||
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c index 97a50d38c98f..f6b3c722230c 100644 --- a/arch/mips/mm/dma-coherent.c +++ b/arch/mips/mm/dma-coherent.c | |||
@@ -9,16 +9,16 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/config.h> | 10 | #include <linux/config.h> |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
15 | #include <linux/pci.h> | ||
16 | 16 | ||
17 | #include <asm/cache.h> | 17 | #include <asm/cache.h> |
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
19 | 19 | ||
20 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 20 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
21 | dma_addr_t * dma_handle, int gfp) | 21 | dma_addr_t * dma_handle, gfp_t gfp) |
22 | { | 22 | { |
23 | void *ret; | 23 | void *ret; |
24 | /* ignore region specifiers */ | 24 | /* ignore region specifiers */ |
@@ -39,7 +39,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
39 | EXPORT_SYMBOL(dma_alloc_noncoherent); | 39 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
40 | 40 | ||
41 | void *dma_alloc_coherent(struct device *dev, size_t size, | 41 | void *dma_alloc_coherent(struct device *dev, size_t size, |
42 | dma_addr_t * dma_handle, int gfp) | 42 | dma_addr_t * dma_handle, gfp_t gfp) |
43 | __attribute__((alias("dma_alloc_noncoherent"))); | 43 | __attribute__((alias("dma_alloc_noncoherent"))); |
44 | 44 | ||
45 | EXPORT_SYMBOL(dma_alloc_coherent); | 45 | EXPORT_SYMBOL(dma_alloc_coherent); |
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c index aa7c94b5d781..8da19fd22ac6 100644 --- a/arch/mips/mm/dma-ip27.c +++ b/arch/mips/mm/dma-ip27.c | |||
@@ -22,7 +22,7 @@ | |||
22 | pdev_to_baddr(to_pci_dev(dev), (addr)) | 22 | pdev_to_baddr(to_pci_dev(dev), (addr)) |
23 | 23 | ||
24 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 24 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
25 | dma_addr_t * dma_handle, int gfp) | 25 | dma_addr_t * dma_handle, gfp_t gfp) |
26 | { | 26 | { |
27 | void *ret; | 27 | void *ret; |
28 | 28 | ||
@@ -44,7 +44,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
44 | EXPORT_SYMBOL(dma_alloc_noncoherent); | 44 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
45 | 45 | ||
46 | void *dma_alloc_coherent(struct device *dev, size_t size, | 46 | void *dma_alloc_coherent(struct device *dev, size_t size, |
47 | dma_addr_t * dma_handle, int gfp) | 47 | dma_addr_t * dma_handle, gfp_t gfp) |
48 | __attribute__((alias("dma_alloc_noncoherent"))); | 48 | __attribute__((alias("dma_alloc_noncoherent"))); |
49 | 49 | ||
50 | EXPORT_SYMBOL(dma_alloc_coherent); | 50 | EXPORT_SYMBOL(dma_alloc_coherent); |
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c index 2cbe196c35fb..a7e3072ff78d 100644 --- a/arch/mips/mm/dma-ip32.c +++ b/arch/mips/mm/dma-ip32.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #define RAM_OFFSET_MASK 0x3fffffff | 37 | #define RAM_OFFSET_MASK 0x3fffffff |
38 | 38 | ||
39 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 39 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
40 | dma_addr_t * dma_handle, int gfp) | 40 | dma_addr_t * dma_handle, gfp_t gfp) |
41 | { | 41 | { |
42 | void *ret; | 42 | void *ret; |
43 | /* ignore region specifiers */ | 43 | /* ignore region specifiers */ |
@@ -61,7 +61,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
61 | EXPORT_SYMBOL(dma_alloc_noncoherent); | 61 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
62 | 62 | ||
63 | void *dma_alloc_coherent(struct device *dev, size_t size, | 63 | void *dma_alloc_coherent(struct device *dev, size_t size, |
64 | dma_addr_t * dma_handle, int gfp) | 64 | dma_addr_t * dma_handle, gfp_t gfp) |
65 | { | 65 | { |
66 | void *ret; | 66 | void *ret; |
67 | 67 | ||
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 59e54f12212e..cd4ea8474f89 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 26 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
27 | dma_addr_t * dma_handle, int gfp) | 27 | dma_addr_t * dma_handle, gfp_t gfp) |
28 | { | 28 | { |
29 | void *ret; | 29 | void *ret; |
30 | /* ignore region specifiers */ | 30 | /* ignore region specifiers */ |
@@ -45,7 +45,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
45 | EXPORT_SYMBOL(dma_alloc_noncoherent); | 45 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
46 | 46 | ||
47 | void *dma_alloc_coherent(struct device *dev, size_t size, | 47 | void *dma_alloc_coherent(struct device *dev, size_t size, |
48 | dma_addr_t * dma_handle, int gfp) | 48 | dma_addr_t * dma_handle, gfp_t gfp) |
49 | { | 49 | { |
50 | void *ret; | 50 | void *ret; |
51 | 51 | ||
@@ -105,22 +105,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |||
105 | { | 105 | { |
106 | unsigned long addr = (unsigned long) ptr; | 106 | unsigned long addr = (unsigned long) ptr; |
107 | 107 | ||
108 | switch (direction) { | 108 | __dma_sync(addr, size, direction); |
109 | case DMA_TO_DEVICE: | ||
110 | dma_cache_wback(addr, size); | ||
111 | break; | ||
112 | |||
113 | case DMA_FROM_DEVICE: | ||
114 | dma_cache_inv(addr, size); | ||
115 | break; | ||
116 | |||
117 | case DMA_BIDIRECTIONAL: | ||
118 | dma_cache_wback_inv(addr, size); | ||
119 | break; | ||
120 | |||
121 | default: | ||
122 | BUG(); | ||
123 | } | ||
124 | 109 | ||
125 | return virt_to_phys(ptr); | 110 | return virt_to_phys(ptr); |
126 | } | 111 | } |
@@ -133,22 +118,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
133 | unsigned long addr; | 118 | unsigned long addr; |
134 | addr = dma_addr + PAGE_OFFSET; | 119 | addr = dma_addr + PAGE_OFFSET; |
135 | 120 | ||
136 | switch (direction) { | 121 | //__dma_sync(addr, size, direction); |
137 | case DMA_TO_DEVICE: | ||
138 | //dma_cache_wback(addr, size); | ||
139 | break; | ||
140 | |||
141 | case DMA_FROM_DEVICE: | ||
142 | //dma_cache_inv(addr, size); | ||
143 | break; | ||
144 | |||
145 | case DMA_BIDIRECTIONAL: | ||
146 | //dma_cache_wback_inv(addr, size); | ||
147 | break; | ||
148 | |||
149 | default: | ||
150 | BUG(); | ||
151 | } | ||
152 | } | 122 | } |
153 | 123 | ||
154 | EXPORT_SYMBOL(dma_unmap_single); | 124 | EXPORT_SYMBOL(dma_unmap_single); |
@@ -164,10 +134,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
164 | unsigned long addr; | 134 | unsigned long addr; |
165 | 135 | ||
166 | addr = (unsigned long) page_address(sg->page); | 136 | addr = (unsigned long) page_address(sg->page); |
167 | if (addr) | 137 | if (addr) { |
168 | __dma_sync(addr + sg->offset, sg->length, direction); | 138 | __dma_sync(addr + sg->offset, sg->length, direction); |
169 | sg->dma_address = (dma_addr_t) | 139 | sg->dma_address = (dma_addr_t)page_to_phys(sg->page) |
170 | (page_to_phys(sg->page) + sg->offset); | 140 | + sg->offset; |
141 | } | ||
171 | } | 142 | } |
172 | 143 | ||
173 | return nents; | 144 | return nents; |
@@ -218,9 +189,8 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
218 | 189 | ||
219 | for (i = 0; i < nhwentries; i++, sg++) { | 190 | for (i = 0; i < nhwentries; i++, sg++) { |
220 | addr = (unsigned long) page_address(sg->page); | 191 | addr = (unsigned long) page_address(sg->page); |
221 | if (!addr) | 192 | if (addr) |
222 | continue; | 193 | __dma_sync(addr + sg->offset, sg->length, direction); |
223 | dma_cache_wback_inv(addr + sg->offset, sg->length); | ||
224 | } | 194 | } |
225 | } | 195 | } |
226 | 196 | ||
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index ec8077c74e9c..2d9624fd10ec 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/system.h> | 25 | #include <asm/system.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | #include <asm/ptrace.h> | 27 | #include <asm/ptrace.h> |
28 | #include <asm/highmem.h> /* For VMALLOC_END */ | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * This routine handles page faults. It determines the address, | 31 | * This routine handles page faults. It determines the address, |
@@ -57,7 +58,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, | |||
57 | * only copy the information from the master page table, | 58 | * only copy the information from the master page table, |
58 | * nothing more. | 59 | * nothing more. |
59 | */ | 60 | */ |
60 | if (unlikely(address >= VMALLOC_START)) | 61 | if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) |
61 | goto vmalloc_fault; | 62 | goto vmalloc_fault; |
62 | 63 | ||
63 | /* | 64 | /* |
@@ -140,7 +141,7 @@ bad_area_nosemaphore: | |||
140 | info.si_signo = SIGSEGV; | 141 | info.si_signo = SIGSEGV; |
141 | info.si_errno = 0; | 142 | info.si_errno = 0; |
142 | /* info.si_code has been set above */ | 143 | /* info.si_code has been set above */ |
143 | info.si_addr = (void *) address; | 144 | info.si_addr = (void __user *) address; |
144 | force_sig_info(SIGSEGV, &info, tsk); | 145 | force_sig_info(SIGSEGV, &info, tsk); |
145 | return; | 146 | return; |
146 | } | 147 | } |
@@ -196,7 +197,7 @@ do_sigbus: | |||
196 | info.si_signo = SIGBUS; | 197 | info.si_signo = SIGBUS; |
197 | info.si_errno = 0; | 198 | info.si_errno = 0; |
198 | info.si_code = BUS_ADRERR; | 199 | info.si_code = BUS_ADRERR; |
199 | info.si_addr = (void *) address; | 200 | info.si_addr = (void __user *) address; |
200 | force_sig_info(SIGBUS, &info, tsk); | 201 | force_sig_info(SIGBUS, &info, tsk); |
201 | 202 | ||
202 | return; | 203 | return; |
@@ -212,6 +213,7 @@ vmalloc_fault: | |||
212 | */ | 213 | */ |
213 | int offset = __pgd_offset(address); | 214 | int offset = __pgd_offset(address); |
214 | pgd_t *pgd, *pgd_k; | 215 | pgd_t *pgd, *pgd_k; |
216 | pud_t *pud, *pud_k; | ||
215 | pmd_t *pmd, *pmd_k; | 217 | pmd_t *pmd, *pmd_k; |
216 | pte_t *pte_k; | 218 | pte_t *pte_k; |
217 | 219 | ||
@@ -222,8 +224,13 @@ vmalloc_fault: | |||
222 | goto no_context; | 224 | goto no_context; |
223 | set_pgd(pgd, *pgd_k); | 225 | set_pgd(pgd, *pgd_k); |
224 | 226 | ||
225 | pmd = pmd_offset(pgd, address); | 227 | pud = pud_offset(pgd, address); |
226 | pmd_k = pmd_offset(pgd_k, address); | 228 | pud_k = pud_offset(pgd_k, address); |
229 | if (!pud_present(*pud_k)) | ||
230 | goto no_context; | ||
231 | |||
232 | pmd = pmd_offset(pud, address); | ||
233 | pmd_k = pmd_offset(pud_k, address); | ||
227 | if (!pmd_present(*pmd_k)) | 234 | if (!pmd_present(*pmd_k)) |
228 | goto no_context; | 235 | goto no_context; |
229 | set_pmd(pmd, *pmd_k); | 236 | set_pmd(pmd, *pmd_k); |
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index dd5e2e31885b..1f7b37b38f5c 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c | |||
@@ -83,6 +83,25 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) | |||
83 | preempt_check_resched(); | 83 | preempt_check_resched(); |
84 | } | 84 | } |
85 | 85 | ||
86 | /* | ||
87 | * This is the same as kmap_atomic() but can map memory that doesn't | ||
88 | * have a struct page associated with it. | ||
89 | */ | ||
90 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | ||
91 | { | ||
92 | enum fixed_addresses idx; | ||
93 | unsigned long vaddr; | ||
94 | |||
95 | inc_preempt_count(); | ||
96 | |||
97 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
98 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
99 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | ||
100 | flush_tlb_one(vaddr); | ||
101 | |||
102 | return (void*) vaddr; | ||
103 | } | ||
104 | |||
86 | struct page *__kmap_atomic_to_page(void *ptr) | 105 | struct page *__kmap_atomic_to_page(void *ptr) |
87 | { | 106 | { |
88 | unsigned long idx, vaddr = (unsigned long)ptr; | 107 | unsigned long idx, vaddr = (unsigned long)ptr; |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index dc6830b10fab..f75ab748e8cd 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -83,7 +83,7 @@ pte_t *kmap_pte; | |||
83 | pgprot_t kmap_prot; | 83 | pgprot_t kmap_prot; |
84 | 84 | ||
85 | #define kmap_get_fixmap_pte(vaddr) \ | 85 | #define kmap_get_fixmap_pte(vaddr) \ |
86 | pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) | 86 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) |
87 | 87 | ||
88 | static void __init kmap_init(void) | 88 | static void __init kmap_init(void) |
89 | { | 89 | { |
@@ -96,36 +96,42 @@ static void __init kmap_init(void) | |||
96 | kmap_prot = PAGE_KERNEL; | 96 | kmap_prot = PAGE_KERNEL; |
97 | } | 97 | } |
98 | 98 | ||
99 | #ifdef CONFIG_64BIT | 99 | #ifdef CONFIG_32BIT |
100 | static void __init fixrange_init(unsigned long start, unsigned long end, | 100 | void __init fixrange_init(unsigned long start, unsigned long end, |
101 | pgd_t *pgd_base) | 101 | pgd_t *pgd_base) |
102 | { | 102 | { |
103 | pgd_t *pgd; | 103 | pgd_t *pgd; |
104 | pud_t *pud; | ||
104 | pmd_t *pmd; | 105 | pmd_t *pmd; |
105 | pte_t *pte; | 106 | pte_t *pte; |
106 | int i, j; | 107 | int i, j, k; |
107 | unsigned long vaddr; | 108 | unsigned long vaddr; |
108 | 109 | ||
109 | vaddr = start; | 110 | vaddr = start; |
110 | i = __pgd_offset(vaddr); | 111 | i = __pgd_offset(vaddr); |
111 | j = __pmd_offset(vaddr); | 112 | j = __pud_offset(vaddr); |
113 | k = __pmd_offset(vaddr); | ||
112 | pgd = pgd_base + i; | 114 | pgd = pgd_base + i; |
113 | 115 | ||
114 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | 116 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
115 | pmd = (pmd_t *)pgd; | 117 | pud = (pud_t *)pgd; |
116 | for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { | 118 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
117 | if (pmd_none(*pmd)) { | 119 | pmd = (pmd_t *)pud; |
118 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 120 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
119 | set_pmd(pmd, __pmd(pte)); | 121 | if (pmd_none(*pmd)) { |
120 | if (pte != pte_offset_kernel(pmd, 0)) | 122 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
121 | BUG(); | 123 | set_pmd(pmd, __pmd(pte)); |
124 | if (pte != pte_offset_kernel(pmd, 0)) | ||
125 | BUG(); | ||
126 | } | ||
127 | vaddr += PMD_SIZE; | ||
122 | } | 128 | } |
123 | vaddr += PMD_SIZE; | 129 | k = 0; |
124 | } | 130 | } |
125 | j = 0; | 131 | j = 0; |
126 | } | 132 | } |
127 | } | 133 | } |
128 | #endif /* CONFIG_64BIT */ | 134 | #endif /* CONFIG_32BIT */ |
129 | #endif /* CONFIG_HIGHMEM */ | 135 | #endif /* CONFIG_HIGHMEM */ |
130 | 136 | ||
131 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 137 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index adf352273f63..3101d1db5592 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c | |||
@@ -55,7 +55,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, | |||
55 | if (address >= end) | 55 | if (address >= end) |
56 | BUG(); | 56 | BUG(); |
57 | do { | 57 | do { |
58 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 58 | pte_t * pte = pte_alloc_kernel(pmd, address); |
59 | if (!pte) | 59 | if (!pte) |
60 | return -ENOMEM; | 60 | return -ENOMEM; |
61 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 61 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -77,11 +77,15 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr, | |||
77 | flush_cache_all(); | 77 | flush_cache_all(); |
78 | if (address >= end) | 78 | if (address >= end) |
79 | BUG(); | 79 | BUG(); |
80 | spin_lock(&init_mm.page_table_lock); | ||
81 | do { | 80 | do { |
81 | pud_t *pud; | ||
82 | pmd_t *pmd; | 82 | pmd_t *pmd; |
83 | pmd = pmd_alloc(&init_mm, dir, address); | 83 | |
84 | error = -ENOMEM; | 84 | error = -ENOMEM; |
85 | pud = pud_alloc(&init_mm, dir, address); | ||
86 | if (!pud) | ||
87 | break; | ||
88 | pmd = pmd_alloc(&init_mm, pud, address); | ||
85 | if (!pmd) | 89 | if (!pmd) |
86 | break; | 90 | break; |
87 | if (remap_area_pmd(pmd, address, end - address, | 91 | if (remap_area_pmd(pmd, address, end - address, |
@@ -91,21 +95,11 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr, | |||
91 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 95 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
92 | dir++; | 96 | dir++; |
93 | } while (address && (address < end)); | 97 | } while (address && (address < end)); |
94 | spin_unlock(&init_mm.page_table_lock); | ||
95 | flush_tlb_all(); | 98 | flush_tlb_all(); |
96 | return error; | 99 | return error; |
97 | } | 100 | } |
98 | 101 | ||
99 | /* | 102 | /* |
100 | * Allow physical addresses to be fixed up to help 36 bit peripherals. | ||
101 | */ | ||
102 | phys_t __attribute__ ((weak)) | ||
103 | fixup_bigphys_addr(phys_t phys_addr, phys_t size) | ||
104 | { | ||
105 | return phys_addr; | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Generic mapping function (not visible outside): | 103 | * Generic mapping function (not visible outside): |
110 | */ | 104 | */ |
111 | 105 | ||
@@ -121,7 +115,7 @@ fixup_bigphys_addr(phys_t phys_addr, phys_t size) | |||
121 | 115 | ||
122 | #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) | 116 | #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) |
123 | 117 | ||
124 | void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) | 118 | void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) |
125 | { | 119 | { |
126 | struct vm_struct * area; | 120 | struct vm_struct * area; |
127 | unsigned long offset; | 121 | unsigned long offset; |
@@ -141,7 +135,7 @@ void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) | |||
141 | */ | 135 | */ |
142 | if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && | 136 | if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && |
143 | flags == _CACHE_UNCACHED) | 137 | flags == _CACHE_UNCACHED) |
144 | return (void *) KSEG1ADDR(phys_addr); | 138 | return (void __iomem *) CKSEG1ADDR(phys_addr); |
145 | 139 | ||
146 | /* | 140 | /* |
147 | * Don't allow anybody to remap normal RAM that we're using.. | 141 | * Don't allow anybody to remap normal RAM that we're using.. |
@@ -177,10 +171,10 @@ void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) | |||
177 | return NULL; | 171 | return NULL; |
178 | } | 172 | } |
179 | 173 | ||
180 | return (void *) (offset + (char *)addr); | 174 | return (void __iomem *) (offset + (char *)addr); |
181 | } | 175 | } |
182 | 176 | ||
183 | #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) | 177 | #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) |
184 | 178 | ||
185 | void __iounmap(volatile void __iomem *addr) | 179 | void __iounmap(volatile void __iomem *addr) |
186 | { | 180 | { |
@@ -190,10 +184,8 @@ void __iounmap(volatile void __iomem *addr) | |||
190 | return; | 184 | return; |
191 | 185 | ||
192 | p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); | 186 | p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); |
193 | if (!p) { | 187 | if (!p) |
194 | printk(KERN_ERR "iounmap: bad address %p\n", addr); | 188 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
195 | return; | ||
196 | } | ||
197 | 189 | ||
198 | kfree(p); | 190 | kfree(p); |
199 | } | 191 | } |
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index 9f8b16541577..f51e180072e3 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c | |||
@@ -25,7 +25,10 @@ | |||
25 | #include <asm/cpu.h> | 25 | #include <asm/cpu.h> |
26 | #include <asm/war.h> | 26 | #include <asm/war.h> |
27 | 27 | ||
28 | #define half_scache_line_size() (cpu_scache_line_size() >> 1) | 28 | #define half_scache_line_size() (cpu_scache_line_size() >> 1) |
29 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) | ||
30 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) | ||
31 | |||
29 | 32 | ||
30 | /* | 33 | /* |
31 | * Maximum sizes: | 34 | * Maximum sizes: |
@@ -198,15 +201,15 @@ static inline void build_cdex_p(void) | |||
198 | if (store_offset & (cpu_dcache_line_size() - 1)) | 201 | if (store_offset & (cpu_dcache_line_size() - 1)) |
199 | return; | 202 | return; |
200 | 203 | ||
201 | if (R4600_V1_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2010)) { | 204 | if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { |
202 | build_nop(); | 205 | build_nop(); |
203 | build_nop(); | 206 | build_nop(); |
204 | build_nop(); | 207 | build_nop(); |
205 | build_nop(); | 208 | build_nop(); |
206 | } | 209 | } |
207 | 210 | ||
208 | if (R4600_V2_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2020)) | 211 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) |
209 | build_insn_word(0x8c200000); /* lw $zero, ($at) */ | 212 | build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ |
210 | 213 | ||
211 | mi.c_format.opcode = cache_op; | 214 | mi.c_format.opcode = cache_op; |
212 | mi.c_format.rs = 4; /* $a0 */ | 215 | mi.c_format.rs = 4; /* $a0 */ |
@@ -361,7 +364,7 @@ void __init build_clear_page(void) | |||
361 | 364 | ||
362 | build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_clear : 0)); | 365 | build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_clear : 0)); |
363 | 366 | ||
364 | if (R4600_V2_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2020)) | 367 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) |
365 | build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ | 368 | build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ |
366 | 369 | ||
367 | dest = label(); | 370 | dest = label(); |
@@ -404,9 +407,6 @@ dest = label(); | |||
404 | 407 | ||
405 | build_jr_ra(); | 408 | build_jr_ra(); |
406 | 409 | ||
407 | flush_icache_range((unsigned long)&clear_page_array, | ||
408 | (unsigned long) epc); | ||
409 | |||
410 | BUG_ON(epc > clear_page_array + ARRAY_SIZE(clear_page_array)); | 410 | BUG_ON(epc > clear_page_array + ARRAY_SIZE(clear_page_array)); |
411 | } | 411 | } |
412 | 412 | ||
@@ -420,7 +420,7 @@ void __init build_copy_page(void) | |||
420 | 420 | ||
421 | build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_copy : 0)); | 421 | build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_copy : 0)); |
422 | 422 | ||
423 | if (R4600_V2_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2020)) | 423 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) |
424 | build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ | 424 | build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ |
425 | 425 | ||
426 | dest = label(); | 426 | dest = label(); |
@@ -482,8 +482,5 @@ dest = label(); | |||
482 | 482 | ||
483 | build_jr_ra(); | 483 | build_jr_ra(); |
484 | 484 | ||
485 | flush_icache_range((unsigned long)©_page_array, | ||
486 | (unsigned long) epc); | ||
487 | |||
488 | BUG_ON(epc > copy_page_array + ARRAY_SIZE(copy_page_array)); | 485 | BUG_ON(epc > copy_page_array + ARRAY_SIZE(copy_page_array)); |
489 | } | 486 | } |
diff --git a/arch/mips/mm/pg-sb1.c b/arch/mips/mm/pg-sb1.c index 1b6df7133c1e..148c65b9cd8b 100644 --- a/arch/mips/mm/pg-sb1.c +++ b/arch/mips/mm/pg-sb1.c | |||
@@ -60,7 +60,8 @@ static inline void clear_page_cpu(void *page) | |||
60 | " .set noreorder \n" | 60 | " .set noreorder \n" |
61 | #ifdef CONFIG_CPU_HAS_PREFETCH | 61 | #ifdef CONFIG_CPU_HAS_PREFETCH |
62 | " daddiu %0, %0, 128 \n" | 62 | " daddiu %0, %0, 128 \n" |
63 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -128(%0) \n" /* Prefetch the first 4 lines */ | 63 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -128(%0) \n" |
64 | /* Prefetch the first 4 lines */ | ||
64 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -96(%0) \n" | 65 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -96(%0) \n" |
65 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -64(%0) \n" | 66 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -64(%0) \n" |
66 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -32(%0) \n" | 67 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -32(%0) \n" |
@@ -106,7 +107,8 @@ static inline void copy_page_cpu(void *to, void *from) | |||
106 | #ifdef CONFIG_CPU_HAS_PREFETCH | 107 | #ifdef CONFIG_CPU_HAS_PREFETCH |
107 | " daddiu %0, %0, 128 \n" | 108 | " daddiu %0, %0, 128 \n" |
108 | " daddiu %1, %1, 128 \n" | 109 | " daddiu %1, %1, 128 \n" |
109 | " pref " SB1_PREF_LOAD_STREAMED_HINT ", -128(%0)\n" /* Prefetch the first 4 lines */ | 110 | " pref " SB1_PREF_LOAD_STREAMED_HINT ", -128(%0)\n" |
111 | /* Prefetch the first 4 lines */ | ||
110 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -128(%1)\n" | 112 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -128(%1)\n" |
111 | " pref " SB1_PREF_LOAD_STREAMED_HINT ", -96(%0)\n" | 113 | " pref " SB1_PREF_LOAD_STREAMED_HINT ", -96(%0)\n" |
112 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -96(%1)\n" | 114 | " pref " SB1_PREF_STORE_STREAMED_HINT ", -96(%1)\n" |
@@ -207,66 +209,73 @@ typedef struct dmadscr_s { | |||
207 | u64 pad_b; | 209 | u64 pad_b; |
208 | } dmadscr_t; | 210 | } dmadscr_t; |
209 | 211 | ||
210 | static dmadscr_t page_descr[NR_CPUS] __attribute__((aligned(SMP_CACHE_BYTES))); | 212 | static dmadscr_t page_descr[DM_NUM_CHANNELS] |
213 | __attribute__((aligned(SMP_CACHE_BYTES))); | ||
211 | 214 | ||
212 | void sb1_dma_init(void) | 215 | void sb1_dma_init(void) |
213 | { | 216 | { |
214 | int cpu = smp_processor_id(); | 217 | int i; |
215 | u64 base_val = CPHYSADDR(&page_descr[cpu]) | V_DM_DSCR_BASE_RINGSZ(1); | ||
216 | 218 | ||
217 | bus_writeq(base_val, | 219 | for (i = 0; i < DM_NUM_CHANNELS; i++) { |
218 | (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); | 220 | const u64 base_val = CPHYSADDR(&page_descr[i]) | |
219 | bus_writeq(base_val | M_DM_DSCR_BASE_RESET, | 221 | V_DM_DSCR_BASE_RINGSZ(1); |
220 | (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); | 222 | volatile void *base_reg = |
221 | bus_writeq(base_val | M_DM_DSCR_BASE_ENABL, | 223 | IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE)); |
222 | (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); | 224 | |
225 | __raw_writeq(base_val, base_reg); | ||
226 | __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg); | ||
227 | __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg); | ||
228 | } | ||
223 | } | 229 | } |
224 | 230 | ||
225 | void clear_page(void *page) | 231 | void clear_page(void *page) |
226 | { | 232 | { |
227 | int cpu = smp_processor_id(); | 233 | u64 to_phys = CPHYSADDR(page); |
234 | unsigned int cpu = smp_processor_id(); | ||
228 | 235 | ||
229 | /* if the page is above Kseg0, use old way */ | 236 | /* if the page is not in KSEG0, use old way */ |
230 | if ((long)KSEGX(page) != (long)CKSEG0) | 237 | if ((long)KSEGX(page) != (long)CKSEG0) |
231 | return clear_page_cpu(page); | 238 | return clear_page_cpu(page); |
232 | 239 | ||
233 | page_descr[cpu].dscr_a = CPHYSADDR(page) | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT; | 240 | page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM | |
241 | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT; | ||
234 | page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); | 242 | page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); |
235 | bus_writeq(1, (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); | 243 | __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); |
236 | 244 | ||
237 | /* | 245 | /* |
238 | * Don't really want to do it this way, but there's no | 246 | * Don't really want to do it this way, but there's no |
239 | * reliable way to delay completion detection. | 247 | * reliable way to delay completion detection. |
240 | */ | 248 | */ |
241 | while (!(bus_readq((void *)(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) & | 249 | while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG))) |
242 | M_DM_DSCR_BASE_INTERRUPT)))) | 250 | & M_DM_DSCR_BASE_INTERRUPT)) |
243 | ; | 251 | ; |
244 | bus_readq((void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); | 252 | __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); |
245 | } | 253 | } |
246 | 254 | ||
247 | void copy_page(void *to, void *from) | 255 | void copy_page(void *to, void *from) |
248 | { | 256 | { |
249 | unsigned long from_phys = CPHYSADDR(from); | 257 | u64 from_phys = CPHYSADDR(from); |
250 | unsigned long to_phys = CPHYSADDR(to); | 258 | u64 to_phys = CPHYSADDR(to); |
251 | int cpu = smp_processor_id(); | 259 | unsigned int cpu = smp_processor_id(); |
252 | 260 | ||
253 | /* if either page is above Kseg0, use old way */ | 261 | /* if any page is not in KSEG0, use old way */ |
254 | if ((long)KSEGX(to) != (long)CKSEG0 | 262 | if ((long)KSEGX(to) != (long)CKSEG0 |
255 | || (long)KSEGX(from) != (long)CKSEG0) | 263 | || (long)KSEGX(from) != (long)CKSEG0) |
256 | return copy_page_cpu(to, from); | 264 | return copy_page_cpu(to, from); |
257 | 265 | ||
258 | page_descr[cpu].dscr_a = CPHYSADDR(to_phys) | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT; | 266 | page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST | |
259 | page_descr[cpu].dscr_b = CPHYSADDR(from_phys) | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); | 267 | M_DM_DSCRA_INTERRUPT; |
260 | bus_writeq(1, (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); | 268 | page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); |
269 | __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); | ||
261 | 270 | ||
262 | /* | 271 | /* |
263 | * Don't really want to do it this way, but there's no | 272 | * Don't really want to do it this way, but there's no |
264 | * reliable way to delay completion detection. | 273 | * reliable way to delay completion detection. |
265 | */ | 274 | */ |
266 | while (!(bus_readq((void *)(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) & | 275 | while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG))) |
267 | M_DM_DSCR_BASE_INTERRUPT)))) | 276 | & M_DM_DSCR_BASE_INTERRUPT)) |
268 | ; | 277 | ; |
269 | bus_readq((void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); | 278 | __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); |
270 | } | 279 | } |
271 | 280 | ||
272 | #else /* !CONFIG_SIBYTE_DMA_PAGEOPS */ | 281 | #else /* !CONFIG_SIBYTE_DMA_PAGEOPS */ |
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index 4f07f81e8500..4a3c4919e314 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <asm/fixmap.h> | ||
13 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
14 | 15 | ||
15 | void pgd_init(unsigned long page) | 16 | void pgd_init(unsigned long page) |
@@ -29,42 +30,12 @@ void pgd_init(unsigned long page) | |||
29 | } | 30 | } |
30 | } | 31 | } |
31 | 32 | ||
32 | #ifdef CONFIG_HIGHMEM | ||
33 | static void __init fixrange_init (unsigned long start, unsigned long end, | ||
34 | pgd_t *pgd_base) | ||
35 | { | ||
36 | pgd_t *pgd; | ||
37 | pmd_t *pmd; | ||
38 | pte_t *pte; | ||
39 | int i, j; | ||
40 | unsigned long vaddr; | ||
41 | |||
42 | vaddr = start; | ||
43 | i = __pgd_offset(vaddr); | ||
44 | j = __pmd_offset(vaddr); | ||
45 | pgd = pgd_base + i; | ||
46 | |||
47 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | ||
48 | pmd = (pmd_t *)pgd; | ||
49 | for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { | ||
50 | if (pmd_none(*pmd)) { | ||
51 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | ||
52 | set_pmd(pmd, __pmd((unsigned long)pte)); | ||
53 | if (pte != pte_offset_kernel(pmd, 0)) | ||
54 | BUG(); | ||
55 | } | ||
56 | vaddr += PMD_SIZE; | ||
57 | } | ||
58 | j = 0; | ||
59 | } | ||
60 | } | ||
61 | #endif | ||
62 | |||
63 | void __init pagetable_init(void) | 33 | void __init pagetable_init(void) |
64 | { | 34 | { |
65 | #ifdef CONFIG_HIGHMEM | 35 | #ifdef CONFIG_HIGHMEM |
66 | unsigned long vaddr; | 36 | unsigned long vaddr; |
67 | pgd_t *pgd, *pgd_base; | 37 | pgd_t *pgd, *pgd_base; |
38 | pud_t *pud; | ||
68 | pmd_t *pmd; | 39 | pmd_t *pmd; |
69 | pte_t *pte; | 40 | pte_t *pte; |
70 | #endif | 41 | #endif |
@@ -90,7 +61,8 @@ void __init pagetable_init(void) | |||
90 | fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | 61 | fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); |
91 | 62 | ||
92 | pgd = swapper_pg_dir + __pgd_offset(vaddr); | 63 | pgd = swapper_pg_dir + __pgd_offset(vaddr); |
93 | pmd = pmd_offset(pgd, vaddr); | 64 | pud = pud_offset(pgd, vaddr); |
65 | pmd = pmd_offset(pud, vaddr); | ||
94 | pte = pte_offset_kernel(pmd, vaddr); | 66 | pte = pte_offset_kernel(pmd, vaddr); |
95 | pkmap_page_table = pte; | 67 | pkmap_page_table = pte; |
96 | #endif | 68 | #endif |
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 4e92f931aaba..9e8ff8badb19 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/cacheops.h> | 15 | #include <asm/cacheops.h> |
16 | #include <asm/mipsregs.h> | 16 | #include <asm/mipsregs.h> |
17 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
18 | #include <asm/cacheflush.h> /* for run_uncached() */ | ||
18 | 19 | ||
19 | /* Primary cache parameters. */ | 20 | /* Primary cache parameters. */ |
20 | #define sc_lsize 32 | 21 | #define sc_lsize 32 |
@@ -96,25 +97,13 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size) | |||
96 | } | 97 | } |
97 | 98 | ||
98 | /* | 99 | /* |
99 | * This function is executed in the uncached segment CKSEG1. | 100 | * This function is executed in uncached address space. |
100 | * It must not touch the stack, because the stack pointer still points | ||
101 | * into CKSEG0. | ||
102 | * | ||
103 | * Three options: | ||
104 | * - Write it in assembly and guarantee that we don't use the stack. | ||
105 | * - Disable caching for CKSEG0 before calling it. | ||
106 | * - Pray that GCC doesn't randomly start using the stack. | ||
107 | * | ||
108 | * This being Linux, we obviously take the least sane of those options - | ||
109 | * following DaveM's lead in c-r4k.c | ||
110 | * | ||
111 | * It seems we get our kicks from relying on unguaranteed behaviour in GCC | ||
112 | */ | 101 | */ |
113 | static __init void __rm7k_sc_enable(void) | 102 | static __init void __rm7k_sc_enable(void) |
114 | { | 103 | { |
115 | int i; | 104 | int i; |
116 | 105 | ||
117 | set_c0_config(1 << 3); /* CONF_SE */ | 106 | set_c0_config(RM7K_CONF_SE); |
118 | 107 | ||
119 | write_c0_taglo(0); | 108 | write_c0_taglo(0); |
120 | write_c0_taghi(0); | 109 | write_c0_taghi(0); |
@@ -127,24 +116,22 @@ static __init void __rm7k_sc_enable(void) | |||
127 | ".set mips0\n\t" | 116 | ".set mips0\n\t" |
128 | ".set reorder" | 117 | ".set reorder" |
129 | : | 118 | : |
130 | : "r" (KSEG0ADDR(i)), "i" (Index_Store_Tag_SD)); | 119 | : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD)); |
131 | } | 120 | } |
132 | } | 121 | } |
133 | 122 | ||
134 | static __init void rm7k_sc_enable(void) | 123 | static __init void rm7k_sc_enable(void) |
135 | { | 124 | { |
136 | void (*func)(void) = (void *) KSEG1ADDR(&__rm7k_sc_enable); | 125 | if (read_c0_config() & RM7K_CONF_SE) |
137 | |||
138 | if (read_c0_config() & 0x08) /* CONF_SE */ | ||
139 | return; | 126 | return; |
140 | 127 | ||
141 | printk(KERN_INFO "Enabling secondary cache..."); | 128 | printk(KERN_INFO "Enabling secondary cache...\n"); |
142 | func(); | 129 | run_uncached(__rm7k_sc_enable); |
143 | } | 130 | } |
144 | 131 | ||
145 | static void rm7k_sc_disable(void) | 132 | static void rm7k_sc_disable(void) |
146 | { | 133 | { |
147 | clear_c0_config(1<<3); /* CONF_SE */ | 134 | clear_c0_config(RM7K_CONF_SE); |
148 | } | 135 | } |
149 | 136 | ||
150 | struct bcache_ops rm7k_sc_ops = { | 137 | struct bcache_ops rm7k_sc_ops = { |
@@ -158,19 +145,19 @@ void __init rm7k_sc_init(void) | |||
158 | { | 145 | { |
159 | unsigned int config = read_c0_config(); | 146 | unsigned int config = read_c0_config(); |
160 | 147 | ||
161 | if ((config >> 31) & 1) /* Bit 31 set -> no S-Cache */ | 148 | if ((config & RM7K_CONF_SC)) |
162 | return; | 149 | return; |
163 | 150 | ||
164 | printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n", | 151 | printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n", |
165 | (scache_size >> 10), sc_lsize); | 152 | (scache_size >> 10), sc_lsize); |
166 | 153 | ||
167 | if (!((config >> 3) & 1)) /* CONF_SE */ | 154 | if (!(config & RM7K_CONF_SE)) |
168 | rm7k_sc_enable(); | 155 | rm7k_sc_enable(); |
169 | 156 | ||
170 | /* | 157 | /* |
171 | * While we're at it let's deal with the tertiary cache. | 158 | * While we're at it let's deal with the tertiary cache. |
172 | */ | 159 | */ |
173 | if (!((config >> 17) & 1)) { | 160 | if (!(config & RM7K_CONF_TC)) { |
174 | 161 | ||
175 | /* | 162 | /* |
176 | * We can't enable the L3 cache yet. There may be board-specific | 163 | * We can't enable the L3 cache yet. There may be board-specific |
@@ -183,9 +170,9 @@ void __init rm7k_sc_init(void) | |||
183 | * to probe it. | 170 | * to probe it. |
184 | */ | 171 | */ |
185 | printk(KERN_INFO "Tertiary cache present, %s enabled\n", | 172 | printk(KERN_INFO "Tertiary cache present, %s enabled\n", |
186 | config&(1<<12) ? "already" : "not (yet)"); | 173 | (config & RM7K_CONF_TE) ? "already" : "not (yet)"); |
187 | 174 | ||
188 | if ((config >> 12) & 1) | 175 | if ((config & RM7K_CONF_TE)) |
189 | rm7k_tcache_enabled = 1; | 176 | rm7k_tcache_enabled = 1; |
190 | } | 177 | } |
191 | 178 | ||
diff --git a/arch/mips/mm/tlb-andes.c b/arch/mips/mm/tlb-andes.c index 167e08e9661a..3f422a849c41 100644 --- a/arch/mips/mm/tlb-andes.c +++ b/arch/mips/mm/tlb-andes.c | |||
@@ -195,6 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
195 | { | 195 | { |
196 | unsigned long flags; | 196 | unsigned long flags; |
197 | pgd_t *pgdp; | 197 | pgd_t *pgdp; |
198 | pud_t *pudp; | ||
198 | pmd_t *pmdp; | 199 | pmd_t *pmdp; |
199 | pte_t *ptep; | 200 | pte_t *ptep; |
200 | int idx, pid; | 201 | int idx, pid; |
@@ -220,7 +221,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
220 | write_c0_entryhi(address | (pid)); | 221 | write_c0_entryhi(address | (pid)); |
221 | pgdp = pgd_offset(vma->vm_mm, address); | 222 | pgdp = pgd_offset(vma->vm_mm, address); |
222 | tlb_probe(); | 223 | tlb_probe(); |
223 | pmdp = pmd_offset(pgdp, address); | 224 | pudp = pud_offset(pgdp, address); |
225 | pmdp = pmd_offset(pudp, address); | ||
224 | idx = read_c0_index(); | 226 | idx = read_c0_index(); |
225 | ptep = pte_offset_map(pmdp, address); | 227 | ptep = pte_offset_map(pmdp, address); |
226 | write_c0_entrylo0(pte_val(*ptep++) >> 6); | 228 | write_c0_entrylo0(pte_val(*ptep++) >> 6); |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 59d38bc05b69..8297970f0bb1 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -21,6 +21,12 @@ | |||
21 | 21 | ||
22 | extern void build_tlb_refill_handler(void); | 22 | extern void build_tlb_refill_handler(void); |
23 | 23 | ||
24 | /* | ||
25 | * Make sure all entries differ. If they're not different | ||
26 | * MIPS32 will take revenge ... | ||
27 | */ | ||
28 | #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | ||
29 | |||
24 | /* CP0 hazard avoidance. */ | 30 | /* CP0 hazard avoidance. */ |
25 | #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ | 31 | #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ |
26 | "nop; nop; nop; nop; nop; nop;\n\t" \ | 32 | "nop; nop; nop; nop; nop; nop;\n\t" \ |
@@ -42,11 +48,8 @@ void local_flush_tlb_all(void) | |||
42 | 48 | ||
43 | /* Blast 'em all away. */ | 49 | /* Blast 'em all away. */ |
44 | while (entry < current_cpu_data.tlbsize) { | 50 | while (entry < current_cpu_data.tlbsize) { |
45 | /* | 51 | /* Make sure all entries differ. */ |
46 | * Make sure all entries differ. If they're not different | 52 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); |
47 | * MIPS32 will take revenge ... | ||
48 | */ | ||
49 | write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); | ||
50 | write_c0_index(entry); | 53 | write_c0_index(entry); |
51 | mtc0_tlbw_hazard(); | 54 | mtc0_tlbw_hazard(); |
52 | tlb_write_indexed(); | 55 | tlb_write_indexed(); |
@@ -57,12 +60,21 @@ void local_flush_tlb_all(void) | |||
57 | local_irq_restore(flags); | 60 | local_irq_restore(flags); |
58 | } | 61 | } |
59 | 62 | ||
63 | /* All entries common to a mm share an asid. To effectively flush | ||
64 | these entries, we just bump the asid. */ | ||
60 | void local_flush_tlb_mm(struct mm_struct *mm) | 65 | void local_flush_tlb_mm(struct mm_struct *mm) |
61 | { | 66 | { |
62 | int cpu = smp_processor_id(); | 67 | int cpu; |
68 | |||
69 | preempt_disable(); | ||
63 | 70 | ||
64 | if (cpu_context(cpu, mm) != 0) | 71 | cpu = smp_processor_id(); |
65 | drop_mmu_context(mm,cpu); | 72 | |
73 | if (cpu_context(cpu, mm) != 0) { | ||
74 | drop_mmu_context(mm, cpu); | ||
75 | } | ||
76 | |||
77 | preempt_enable(); | ||
66 | } | 78 | } |
67 | 79 | ||
68 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 80 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
@@ -75,9 +87,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
75 | unsigned long flags; | 87 | unsigned long flags; |
76 | int size; | 88 | int size; |
77 | 89 | ||
78 | local_irq_save(flags); | ||
79 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 90 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
80 | size = (size + 1) >> 1; | 91 | size = (size + 1) >> 1; |
92 | local_irq_save(flags); | ||
81 | if (size <= current_cpu_data.tlbsize/2) { | 93 | if (size <= current_cpu_data.tlbsize/2) { |
82 | int oldpid = read_c0_entryhi(); | 94 | int oldpid = read_c0_entryhi(); |
83 | int newpid = cpu_asid(cpu, mm); | 95 | int newpid = cpu_asid(cpu, mm); |
@@ -99,8 +111,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
99 | if (idx < 0) | 111 | if (idx < 0) |
100 | continue; | 112 | continue; |
101 | /* Make sure all entries differ. */ | 113 | /* Make sure all entries differ. */ |
102 | write_c0_entryhi(CKSEG0 + | 114 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
103 | (idx << (PAGE_SHIFT + 1))); | ||
104 | mtc0_tlbw_hazard(); | 115 | mtc0_tlbw_hazard(); |
105 | tlb_write_indexed(); | 116 | tlb_write_indexed(); |
106 | } | 117 | } |
@@ -118,9 +129,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
118 | unsigned long flags; | 129 | unsigned long flags; |
119 | int size; | 130 | int size; |
120 | 131 | ||
121 | local_irq_save(flags); | ||
122 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 132 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
123 | size = (size + 1) >> 1; | 133 | size = (size + 1) >> 1; |
134 | local_irq_save(flags); | ||
124 | if (size <= current_cpu_data.tlbsize / 2) { | 135 | if (size <= current_cpu_data.tlbsize / 2) { |
125 | int pid = read_c0_entryhi(); | 136 | int pid = read_c0_entryhi(); |
126 | 137 | ||
@@ -142,7 +153,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
142 | if (idx < 0) | 153 | if (idx < 0) |
143 | continue; | 154 | continue; |
144 | /* Make sure all entries differ. */ | 155 | /* Make sure all entries differ. */ |
145 | write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); | 156 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
146 | mtc0_tlbw_hazard(); | 157 | mtc0_tlbw_hazard(); |
147 | tlb_write_indexed(); | 158 | tlb_write_indexed(); |
148 | } | 159 | } |
@@ -176,7 +187,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
176 | if (idx < 0) | 187 | if (idx < 0) |
177 | goto finish; | 188 | goto finish; |
178 | /* Make sure all entries differ. */ | 189 | /* Make sure all entries differ. */ |
179 | write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); | 190 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
180 | mtc0_tlbw_hazard(); | 191 | mtc0_tlbw_hazard(); |
181 | tlb_write_indexed(); | 192 | tlb_write_indexed(); |
182 | tlbw_use_hazard(); | 193 | tlbw_use_hazard(); |
@@ -197,8 +208,8 @@ void local_flush_tlb_one(unsigned long page) | |||
197 | int oldpid, idx; | 208 | int oldpid, idx; |
198 | 209 | ||
199 | local_irq_save(flags); | 210 | local_irq_save(flags); |
200 | page &= (PAGE_MASK << 1); | ||
201 | oldpid = read_c0_entryhi(); | 211 | oldpid = read_c0_entryhi(); |
212 | page &= (PAGE_MASK << 1); | ||
202 | write_c0_entryhi(page); | 213 | write_c0_entryhi(page); |
203 | mtc0_tlbw_hazard(); | 214 | mtc0_tlbw_hazard(); |
204 | tlb_probe(); | 215 | tlb_probe(); |
@@ -208,7 +219,7 @@ void local_flush_tlb_one(unsigned long page) | |||
208 | write_c0_entrylo1(0); | 219 | write_c0_entrylo1(0); |
209 | if (idx >= 0) { | 220 | if (idx >= 0) { |
210 | /* Make sure all entries differ. */ | 221 | /* Make sure all entries differ. */ |
211 | write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); | 222 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); |
212 | mtc0_tlbw_hazard(); | 223 | mtc0_tlbw_hazard(); |
213 | tlb_write_indexed(); | 224 | tlb_write_indexed(); |
214 | tlbw_use_hazard(); | 225 | tlbw_use_hazard(); |
@@ -227,6 +238,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
227 | { | 238 | { |
228 | unsigned long flags; | 239 | unsigned long flags; |
229 | pgd_t *pgdp; | 240 | pgd_t *pgdp; |
241 | pud_t *pudp; | ||
230 | pmd_t *pmdp; | 242 | pmd_t *pmdp; |
231 | pte_t *ptep; | 243 | pte_t *ptep; |
232 | int idx, pid; | 244 | int idx, pid; |
@@ -237,35 +249,34 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
237 | if (current->active_mm != vma->vm_mm) | 249 | if (current->active_mm != vma->vm_mm) |
238 | return; | 250 | return; |
239 | 251 | ||
240 | pid = read_c0_entryhi() & ASID_MASK; | ||
241 | |||
242 | local_irq_save(flags); | 252 | local_irq_save(flags); |
253 | |||
254 | pid = read_c0_entryhi() & ASID_MASK; | ||
243 | address &= (PAGE_MASK << 1); | 255 | address &= (PAGE_MASK << 1); |
244 | write_c0_entryhi(address | pid); | 256 | write_c0_entryhi(address | pid); |
245 | pgdp = pgd_offset(vma->vm_mm, address); | 257 | pgdp = pgd_offset(vma->vm_mm, address); |
246 | mtc0_tlbw_hazard(); | 258 | mtc0_tlbw_hazard(); |
247 | tlb_probe(); | 259 | tlb_probe(); |
248 | BARRIER; | 260 | BARRIER; |
249 | pmdp = pmd_offset(pgdp, address); | 261 | pudp = pud_offset(pgdp, address); |
262 | pmdp = pmd_offset(pudp, address); | ||
250 | idx = read_c0_index(); | 263 | idx = read_c0_index(); |
251 | ptep = pte_offset_map(pmdp, address); | 264 | ptep = pte_offset_map(pmdp, address); |
252 | 265 | ||
253 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 266 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) |
254 | write_c0_entrylo0(ptep->pte_high); | 267 | write_c0_entrylo0(ptep->pte_high); |
255 | ptep++; | 268 | ptep++; |
256 | write_c0_entrylo1(ptep->pte_high); | 269 | write_c0_entrylo1(ptep->pte_high); |
257 | #else | 270 | #else |
258 | write_c0_entrylo0(pte_val(*ptep++) >> 6); | 271 | write_c0_entrylo0(pte_val(*ptep++) >> 6); |
259 | write_c0_entrylo1(pte_val(*ptep) >> 6); | 272 | write_c0_entrylo1(pte_val(*ptep) >> 6); |
260 | #endif | 273 | #endif |
261 | write_c0_entryhi(address | pid); | ||
262 | mtc0_tlbw_hazard(); | 274 | mtc0_tlbw_hazard(); |
263 | if (idx < 0) | 275 | if (idx < 0) |
264 | tlb_write_random(); | 276 | tlb_write_random(); |
265 | else | 277 | else |
266 | tlb_write_indexed(); | 278 | tlb_write_indexed(); |
267 | tlbw_use_hazard(); | 279 | tlbw_use_hazard(); |
268 | write_c0_entryhi(pid); | ||
269 | local_irq_restore(flags); | 280 | local_irq_restore(flags); |
270 | } | 281 | } |
271 | 282 | ||
@@ -357,7 +368,8 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
357 | old_pagemask = read_c0_pagemask(); | 368 | old_pagemask = read_c0_pagemask(); |
358 | wired = read_c0_wired(); | 369 | wired = read_c0_wired(); |
359 | if (--temp_tlb_entry < wired) { | 370 | if (--temp_tlb_entry < wired) { |
360 | printk(KERN_WARNING "No TLB space left for add_temporary_entry\n"); | 371 | printk(KERN_WARNING |
372 | "No TLB space left for add_temporary_entry\n"); | ||
361 | ret = -ENOSPC; | 373 | ret = -ENOSPC; |
362 | goto out; | 374 | goto out; |
363 | } | 375 | } |
@@ -388,7 +400,7 @@ static void __init probe_tlb(unsigned long config) | |||
388 | * is not supported, we assume R4k style. Cpu probing already figured | 400 | * is not supported, we assume R4k style. Cpu probing already figured |
389 | * out the number of tlb entries. | 401 | * out the number of tlb entries. |
390 | */ | 402 | */ |
391 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) | 403 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) |
392 | return; | 404 | return; |
393 | 405 | ||
394 | reg = read_c0_config1(); | 406 | reg = read_c0_config1(); |
diff --git a/arch/mips/mm/tlb-sb1.c b/arch/mips/mm/tlb-sb1.c deleted file mode 100644 index 6256cafcf3a2..000000000000 --- a/arch/mips/mm/tlb-sb1.c +++ /dev/null | |||
@@ -1,376 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | ||
3 | * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) | ||
4 | * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version 2 | ||
9 | * of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | #include <linux/init.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | #include <asm/bootinfo.h> | ||
23 | #include <asm/cpu.h> | ||
24 | |||
25 | extern void build_tlb_refill_handler(void); | ||
26 | |||
27 | #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | ||
28 | |||
29 | /* Dump the current entry* and pagemask registers */ | ||
30 | static inline void dump_cur_tlb_regs(void) | ||
31 | { | ||
32 | unsigned int entryhihi, entryhilo, entrylo0hi, entrylo0lo, entrylo1hi; | ||
33 | unsigned int entrylo1lo, pagemask; | ||
34 | |||
35 | __asm__ __volatile__ ( | ||
36 | ".set push \n" | ||
37 | ".set noreorder \n" | ||
38 | ".set mips64 \n" | ||
39 | ".set noat \n" | ||
40 | " tlbr \n" | ||
41 | " dmfc0 $1, $10 \n" | ||
42 | " dsrl32 %0, $1, 0 \n" | ||
43 | " sll %1, $1, 0 \n" | ||
44 | " dmfc0 $1, $2 \n" | ||
45 | " dsrl32 %2, $1, 0 \n" | ||
46 | " sll %3, $1, 0 \n" | ||
47 | " dmfc0 $1, $3 \n" | ||
48 | " dsrl32 %4, $1, 0 \n" | ||
49 | " sll %5, $1, 0 \n" | ||
50 | " mfc0 %6, $5 \n" | ||
51 | ".set pop \n" | ||
52 | : "=r" (entryhihi), "=r" (entryhilo), | ||
53 | "=r" (entrylo0hi), "=r" (entrylo0lo), | ||
54 | "=r" (entrylo1hi), "=r" (entrylo1lo), | ||
55 | "=r" (pagemask)); | ||
56 | |||
57 | printk("%08X%08X %08X%08X %08X%08X %08X", | ||
58 | entryhihi, entryhilo, | ||
59 | entrylo0hi, entrylo0lo, | ||
60 | entrylo1hi, entrylo1lo, | ||
61 | pagemask); | ||
62 | } | ||
63 | |||
64 | void sb1_dump_tlb(void) | ||
65 | { | ||
66 | unsigned long old_ctx; | ||
67 | unsigned long flags; | ||
68 | int entry; | ||
69 | local_irq_save(flags); | ||
70 | old_ctx = read_c0_entryhi(); | ||
71 | printk("Current TLB registers state:\n" | ||
72 | " EntryHi EntryLo0 EntryLo1 PageMask Index\n" | ||
73 | "--------------------------------------------------------------------\n"); | ||
74 | dump_cur_tlb_regs(); | ||
75 | printk(" %08X\n", read_c0_index()); | ||
76 | printk("\n\nFull TLB Dump:\n" | ||
77 | "Idx EntryHi EntryLo0 EntryLo1 PageMask\n" | ||
78 | "--------------------------------------------------------------\n"); | ||
79 | for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { | ||
80 | write_c0_index(entry); | ||
81 | printk("\n%02i ", entry); | ||
82 | dump_cur_tlb_regs(); | ||
83 | } | ||
84 | printk("\n"); | ||
85 | write_c0_entryhi(old_ctx); | ||
86 | local_irq_restore(flags); | ||
87 | } | ||
88 | |||
89 | void local_flush_tlb_all(void) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | unsigned long old_ctx; | ||
93 | int entry; | ||
94 | |||
95 | local_irq_save(flags); | ||
96 | /* Save old context and create impossible VPN2 value */ | ||
97 | old_ctx = read_c0_entryhi() & ASID_MASK; | ||
98 | write_c0_entrylo0(0); | ||
99 | write_c0_entrylo1(0); | ||
100 | |||
101 | entry = read_c0_wired(); | ||
102 | while (entry < current_cpu_data.tlbsize) { | ||
103 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); | ||
104 | write_c0_index(entry); | ||
105 | tlb_write_indexed(); | ||
106 | entry++; | ||
107 | } | ||
108 | write_c0_entryhi(old_ctx); | ||
109 | local_irq_restore(flags); | ||
110 | } | ||
111 | |||
112 | |||
113 | /* | ||
114 | * Use a bogus region of memory (starting at 0) to sanitize the TLB's. | ||
115 | * Use increments of the maximum page size (16MB), and check for duplicate | ||
116 | * entries before doing a given write. Then, when we're safe from collisions | ||
117 | * with the firmware, go back and give all the entries invalid addresses with | ||
118 | * the normal flush routine. Wired entries will be killed as well! | ||
119 | */ | ||
120 | static void __init sb1_sanitize_tlb(void) | ||
121 | { | ||
122 | int entry; | ||
123 | long addr = 0; | ||
124 | |||
125 | long inc = 1<<24; /* 16MB */ | ||
126 | /* Save old context and create impossible VPN2 value */ | ||
127 | write_c0_entrylo0(0); | ||
128 | write_c0_entrylo1(0); | ||
129 | for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { | ||
130 | do { | ||
131 | addr += inc; | ||
132 | write_c0_entryhi(addr); | ||
133 | tlb_probe(); | ||
134 | } while ((int)(read_c0_index()) >= 0); | ||
135 | write_c0_index(entry); | ||
136 | tlb_write_indexed(); | ||
137 | } | ||
138 | /* Now that we know we're safe from collisions, we can safely flush | ||
139 | the TLB with the "normal" routine. */ | ||
140 | local_flush_tlb_all(); | ||
141 | } | ||
142 | |||
143 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
144 | unsigned long end) | ||
145 | { | ||
146 | struct mm_struct *mm = vma->vm_mm; | ||
147 | unsigned long flags; | ||
148 | int cpu; | ||
149 | |||
150 | local_irq_save(flags); | ||
151 | cpu = smp_processor_id(); | ||
152 | if (cpu_context(cpu, mm) != 0) { | ||
153 | int size; | ||
154 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
155 | size = (size + 1) >> 1; | ||
156 | if (size <= (current_cpu_data.tlbsize/2)) { | ||
157 | int oldpid = read_c0_entryhi() & ASID_MASK; | ||
158 | int newpid = cpu_asid(cpu, mm); | ||
159 | |||
160 | start &= (PAGE_MASK << 1); | ||
161 | end += ((PAGE_SIZE << 1) - 1); | ||
162 | end &= (PAGE_MASK << 1); | ||
163 | while (start < end) { | ||
164 | int idx; | ||
165 | |||
166 | write_c0_entryhi(start | newpid); | ||
167 | start += (PAGE_SIZE << 1); | ||
168 | tlb_probe(); | ||
169 | idx = read_c0_index(); | ||
170 | write_c0_entrylo0(0); | ||
171 | write_c0_entrylo1(0); | ||
172 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); | ||
173 | if (idx < 0) | ||
174 | continue; | ||
175 | tlb_write_indexed(); | ||
176 | } | ||
177 | write_c0_entryhi(oldpid); | ||
178 | } else { | ||
179 | drop_mmu_context(mm, cpu); | ||
180 | } | ||
181 | } | ||
182 | local_irq_restore(flags); | ||
183 | } | ||
184 | |||
185 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
186 | { | ||
187 | unsigned long flags; | ||
188 | int size; | ||
189 | |||
190 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
191 | size = (size + 1) >> 1; | ||
192 | |||
193 | local_irq_save(flags); | ||
194 | if (size <= (current_cpu_data.tlbsize/2)) { | ||
195 | int pid = read_c0_entryhi(); | ||
196 | |||
197 | start &= (PAGE_MASK << 1); | ||
198 | end += ((PAGE_SIZE << 1) - 1); | ||
199 | end &= (PAGE_MASK << 1); | ||
200 | |||
201 | while (start < end) { | ||
202 | int idx; | ||
203 | |||
204 | write_c0_entryhi(start); | ||
205 | start += (PAGE_SIZE << 1); | ||
206 | tlb_probe(); | ||
207 | idx = read_c0_index(); | ||
208 | write_c0_entrylo0(0); | ||
209 | write_c0_entrylo1(0); | ||
210 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); | ||
211 | if (idx < 0) | ||
212 | continue; | ||
213 | tlb_write_indexed(); | ||
214 | } | ||
215 | write_c0_entryhi(pid); | ||
216 | } else { | ||
217 | local_flush_tlb_all(); | ||
218 | } | ||
219 | local_irq_restore(flags); | ||
220 | } | ||
221 | |||
222 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
223 | { | ||
224 | unsigned long flags; | ||
225 | int cpu = smp_processor_id(); | ||
226 | |||
227 | local_irq_save(flags); | ||
228 | if (cpu_context(cpu, vma->vm_mm) != 0) { | ||
229 | int oldpid, newpid, idx; | ||
230 | newpid = cpu_asid(cpu, vma->vm_mm); | ||
231 | page &= (PAGE_MASK << 1); | ||
232 | oldpid = read_c0_entryhi() & ASID_MASK; | ||
233 | write_c0_entryhi(page | newpid); | ||
234 | tlb_probe(); | ||
235 | idx = read_c0_index(); | ||
236 | write_c0_entrylo0(0); | ||
237 | write_c0_entrylo1(0); | ||
238 | if (idx < 0) | ||
239 | goto finish; | ||
240 | /* Make sure all entries differ. */ | ||
241 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); | ||
242 | tlb_write_indexed(); | ||
243 | finish: | ||
244 | write_c0_entryhi(oldpid); | ||
245 | } | ||
246 | local_irq_restore(flags); | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Remove one kernel space TLB entry. This entry is assumed to be marked | ||
251 | * global so we don't do the ASID thing. | ||
252 | */ | ||
253 | void local_flush_tlb_one(unsigned long page) | ||
254 | { | ||
255 | unsigned long flags; | ||
256 | int oldpid, idx; | ||
257 | |||
258 | page &= (PAGE_MASK << 1); | ||
259 | oldpid = read_c0_entryhi() & ASID_MASK; | ||
260 | |||
261 | local_irq_save(flags); | ||
262 | write_c0_entryhi(page); | ||
263 | tlb_probe(); | ||
264 | idx = read_c0_index(); | ||
265 | if (idx >= 0) { | ||
266 | /* Make sure all entries differ. */ | ||
267 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); | ||
268 | write_c0_entrylo0(0); | ||
269 | write_c0_entrylo1(0); | ||
270 | tlb_write_indexed(); | ||
271 | } | ||
272 | |||
273 | write_c0_entryhi(oldpid); | ||
274 | local_irq_restore(flags); | ||
275 | } | ||
276 | |||
277 | /* All entries common to a mm share an asid. To effectively flush | ||
278 | these entries, we just bump the asid. */ | ||
279 | void local_flush_tlb_mm(struct mm_struct *mm) | ||
280 | { | ||
281 | int cpu; | ||
282 | |||
283 | preempt_disable(); | ||
284 | |||
285 | cpu = smp_processor_id(); | ||
286 | |||
287 | if (cpu_context(cpu, mm) != 0) { | ||
288 | drop_mmu_context(mm, cpu); | ||
289 | } | ||
290 | |||
291 | preempt_enable(); | ||
292 | } | ||
293 | |||
294 | /* Stolen from mips32 routines */ | ||
295 | |||
296 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
297 | { | ||
298 | unsigned long flags; | ||
299 | pgd_t *pgdp; | ||
300 | pmd_t *pmdp; | ||
301 | pte_t *ptep; | ||
302 | int idx, pid; | ||
303 | |||
304 | /* | ||
305 | * Handle debugger faulting in for debugee. | ||
306 | */ | ||
307 | if (current->active_mm != vma->vm_mm) | ||
308 | return; | ||
309 | |||
310 | local_irq_save(flags); | ||
311 | |||
312 | pid = read_c0_entryhi() & ASID_MASK; | ||
313 | address &= (PAGE_MASK << 1); | ||
314 | write_c0_entryhi(address | (pid)); | ||
315 | pgdp = pgd_offset(vma->vm_mm, address); | ||
316 | tlb_probe(); | ||
317 | pmdp = pmd_offset(pgdp, address); | ||
318 | idx = read_c0_index(); | ||
319 | ptep = pte_offset_map(pmdp, address); | ||
320 | write_c0_entrylo0(pte_val(*ptep++) >> 6); | ||
321 | write_c0_entrylo1(pte_val(*ptep) >> 6); | ||
322 | if (idx < 0) { | ||
323 | tlb_write_random(); | ||
324 | } else { | ||
325 | tlb_write_indexed(); | ||
326 | } | ||
327 | local_irq_restore(flags); | ||
328 | } | ||
329 | |||
330 | void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | ||
331 | unsigned long entryhi, unsigned long pagemask) | ||
332 | { | ||
333 | unsigned long flags; | ||
334 | unsigned long wired; | ||
335 | unsigned long old_pagemask; | ||
336 | unsigned long old_ctx; | ||
337 | |||
338 | local_irq_save(flags); | ||
339 | old_ctx = read_c0_entryhi() & 0xff; | ||
340 | old_pagemask = read_c0_pagemask(); | ||
341 | wired = read_c0_wired(); | ||
342 | write_c0_wired(wired + 1); | ||
343 | write_c0_index(wired); | ||
344 | |||
345 | write_c0_pagemask(pagemask); | ||
346 | write_c0_entryhi(entryhi); | ||
347 | write_c0_entrylo0(entrylo0); | ||
348 | write_c0_entrylo1(entrylo1); | ||
349 | tlb_write_indexed(); | ||
350 | |||
351 | write_c0_entryhi(old_ctx); | ||
352 | write_c0_pagemask(old_pagemask); | ||
353 | |||
354 | local_flush_tlb_all(); | ||
355 | local_irq_restore(flags); | ||
356 | } | ||
357 | |||
358 | /* | ||
359 | * This is called from loadmmu.c. We have to set up all the | ||
360 | * memory management function pointers, as well as initialize | ||
361 | * the caches and tlbs | ||
362 | */ | ||
363 | void tlb_init(void) | ||
364 | { | ||
365 | write_c0_pagemask(PM_DEFAULT_MASK); | ||
366 | write_c0_wired(0); | ||
367 | |||
368 | /* | ||
369 | * We don't know what state the firmware left the TLB's in, so this is | ||
370 | * the ultra-conservative way to flush the TLB's and avoid machine | ||
371 | * check exceptions due to duplicate TLB entries | ||
372 | */ | ||
373 | sb1_sanitize_tlb(); | ||
374 | |||
375 | build_tlb_refill_handler(); | ||
376 | } | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 6569be3983c7..0f9485806bac 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Synthesize TLB refill handlers at runtime. | 6 | * Synthesize TLB refill handlers at runtime. |
7 | * | 7 | * |
8 | * Copyright (C) 2004,2005 by Thiemo Seufer | 8 | * Copyright (C) 2004,2005 by Thiemo Seufer |
9 | * Copyright (C) 2005 Maciej W. Rozycki | ||
9 | */ | 10 | */ |
10 | 11 | ||
11 | #include <stdarg.h> | 12 | #include <stdarg.h> |
@@ -91,7 +92,7 @@ enum opcode { | |||
91 | insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, | 92 | insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, |
92 | insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, | 93 | insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, |
93 | insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, | 94 | insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, |
94 | insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, | 95 | insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, |
95 | insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, | 96 | insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, |
96 | insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, | 97 | insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, |
97 | insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, | 98 | insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, |
@@ -134,7 +135,6 @@ static __initdata struct insn insn_table[] = { | |||
134 | { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, | 135 | { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, |
135 | { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, | 136 | { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, |
136 | { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE }, | 137 | { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE }, |
137 | { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE }, | ||
138 | { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD }, | 138 | { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD }, |
139 | { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 }, | 139 | { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 }, |
140 | { insn_j, M(j_op,0,0,0,0,0), JIMM }, | 140 | { insn_j, M(j_op,0,0,0,0,0), JIMM }, |
@@ -366,7 +366,6 @@ I_u2u1u3(_dsll); | |||
366 | I_u2u1u3(_dsll32); | 366 | I_u2u1u3(_dsll32); |
367 | I_u2u1u3(_dsra); | 367 | I_u2u1u3(_dsra); |
368 | I_u2u1u3(_dsrl); | 368 | I_u2u1u3(_dsrl); |
369 | I_u2u1u3(_dsrl32); | ||
370 | I_u3u1u2(_dsubu); | 369 | I_u3u1u2(_dsubu); |
371 | I_0(_eret); | 370 | I_0(_eret); |
372 | I_u1(_j); | 371 | I_u1(_j); |
@@ -412,7 +411,6 @@ enum label_id { | |||
412 | label_nopage_tlbm, | 411 | label_nopage_tlbm, |
413 | label_smp_pgtable_change, | 412 | label_smp_pgtable_change, |
414 | label_r3000_write_probe_fail, | 413 | label_r3000_write_probe_fail, |
415 | label_r3000_write_probe_ok | ||
416 | }; | 414 | }; |
417 | 415 | ||
418 | struct label { | 416 | struct label { |
@@ -445,7 +443,6 @@ L_LA(_nopage_tlbs) | |||
445 | L_LA(_nopage_tlbm) | 443 | L_LA(_nopage_tlbm) |
446 | L_LA(_smp_pgtable_change) | 444 | L_LA(_smp_pgtable_change) |
447 | L_LA(_r3000_write_probe_fail) | 445 | L_LA(_r3000_write_probe_fail) |
448 | L_LA(_r3000_write_probe_ok) | ||
449 | 446 | ||
450 | /* convenience macros for instructions */ | 447 | /* convenience macros for instructions */ |
451 | #ifdef CONFIG_64BIT | 448 | #ifdef CONFIG_64BIT |
@@ -490,7 +487,7 @@ L_LA(_r3000_write_probe_ok) | |||
490 | static __init int __attribute__((unused)) in_compat_space_p(long addr) | 487 | static __init int __attribute__((unused)) in_compat_space_p(long addr) |
491 | { | 488 | { |
492 | /* Is this address in 32bit compat space? */ | 489 | /* Is this address in 32bit compat space? */ |
493 | return (((addr) & 0xffffffff00000000) == 0xffffffff00000000); | 490 | return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); |
494 | } | 491 | } |
495 | 492 | ||
496 | static __init int __attribute__((unused)) rel_highest(long val) | 493 | static __init int __attribute__((unused)) rel_highest(long val) |
@@ -734,7 +731,7 @@ static void __init build_r3000_tlb_refill_handler(void) | |||
734 | if (p > tlb_handler + 32) | 731 | if (p > tlb_handler + 32) |
735 | panic("TLB refill handler space exceeded"); | 732 | panic("TLB refill handler space exceeded"); |
736 | 733 | ||
737 | printk("Synthesized TLB handler (%u instructions).\n", | 734 | printk("Synthesized TLB refill handler (%u instructions).\n", |
738 | (unsigned int)(p - tlb_handler)); | 735 | (unsigned int)(p - tlb_handler)); |
739 | #ifdef DEBUG_TLB | 736 | #ifdef DEBUG_TLB |
740 | { | 737 | { |
@@ -746,7 +743,6 @@ static void __init build_r3000_tlb_refill_handler(void) | |||
746 | #endif | 743 | #endif |
747 | 744 | ||
748 | memcpy((void *)CAC_BASE, tlb_handler, 0x80); | 745 | memcpy((void *)CAC_BASE, tlb_handler, 0x80); |
749 | flush_icache_range(CAC_BASE, CAC_BASE + 0x80); | ||
750 | } | 746 | } |
751 | 747 | ||
752 | /* | 748 | /* |
@@ -783,6 +779,8 @@ static __initdata u32 final_handler[64]; | |||
783 | static __init void __attribute__((unused)) build_tlb_probe_entry(u32 **p) | 779 | static __init void __attribute__((unused)) build_tlb_probe_entry(u32 **p) |
784 | { | 780 | { |
785 | switch (current_cpu_data.cputype) { | 781 | switch (current_cpu_data.cputype) { |
782 | /* Found by experiment: R4600 v2.0 needs this, too. */ | ||
783 | case CPU_R4600: | ||
786 | case CPU_R5000: | 784 | case CPU_R5000: |
787 | case CPU_R5000A: | 785 | case CPU_R5000A: |
788 | case CPU_NEVADA: | 786 | case CPU_NEVADA: |
@@ -834,12 +832,20 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, | |||
834 | case CPU_R4700: | 832 | case CPU_R4700: |
835 | case CPU_R5000: | 833 | case CPU_R5000: |
836 | case CPU_R5000A: | 834 | case CPU_R5000A: |
835 | i_nop(p); | ||
836 | tlbw(p); | ||
837 | i_nop(p); | ||
838 | break; | ||
839 | |||
840 | case CPU_R4300: | ||
837 | case CPU_5KC: | 841 | case CPU_5KC: |
838 | case CPU_TX49XX: | 842 | case CPU_TX49XX: |
839 | case CPU_AU1000: | 843 | case CPU_AU1000: |
840 | case CPU_AU1100: | 844 | case CPU_AU1100: |
841 | case CPU_AU1500: | 845 | case CPU_AU1500: |
842 | case CPU_AU1550: | 846 | case CPU_AU1550: |
847 | case CPU_AU1200: | ||
848 | case CPU_PR4450: | ||
843 | i_nop(p); | 849 | i_nop(p); |
844 | tlbw(p); | 850 | tlbw(p); |
845 | break; | 851 | break; |
@@ -848,6 +854,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, | |||
848 | case CPU_R12000: | 854 | case CPU_R12000: |
849 | case CPU_4KC: | 855 | case CPU_4KC: |
850 | case CPU_SB1: | 856 | case CPU_SB1: |
857 | case CPU_SB1A: | ||
851 | case CPU_4KSC: | 858 | case CPU_4KSC: |
852 | case CPU_20KC: | 859 | case CPU_20KC: |
853 | case CPU_25KF: | 860 | case CPU_25KF: |
@@ -875,6 +882,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, | |||
875 | 882 | ||
876 | case CPU_4KEC: | 883 | case CPU_4KEC: |
877 | case CPU_24K: | 884 | case CPU_24K: |
885 | case CPU_34K: | ||
878 | i_ehb(p); | 886 | i_ehb(p); |
879 | tlbw(p); | 887 | tlbw(p); |
880 | break; | 888 | break; |
@@ -911,6 +919,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, | |||
911 | 919 | ||
912 | case CPU_VR4131: | 920 | case CPU_VR4131: |
913 | case CPU_VR4133: | 921 | case CPU_VR4133: |
922 | case CPU_R5432: | ||
914 | i_nop(p); | 923 | i_nop(p); |
915 | i_nop(p); | 924 | i_nop(p); |
916 | tlbw(p); | 925 | tlbw(p); |
@@ -942,34 +951,29 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, | |||
942 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ | 951 | /* No i_nop needed here, since the next insn doesn't touch TMP. */ |
943 | 952 | ||
944 | #ifdef CONFIG_SMP | 953 | #ifdef CONFIG_SMP |
954 | # ifdef CONFIG_BUILD_ELF64 | ||
945 | /* | 955 | /* |
946 | * 64 bit SMP has the lower part of &pgd_current[smp_processor_id()] | 956 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
947 | * stored in CONTEXT. | 957 | * stored in CONTEXT. |
948 | */ | 958 | */ |
949 | if (in_compat_space_p(pgdc)) { | 959 | i_dmfc0(p, ptr, C0_CONTEXT); |
950 | i_dmfc0(p, ptr, C0_CONTEXT); | 960 | i_dsrl(p, ptr, ptr, 23); |
951 | i_dsra(p, ptr, ptr, 23); | 961 | i_LA_mostly(p, tmp, pgdc); |
952 | i_ld(p, ptr, 0, ptr); | 962 | i_daddu(p, ptr, ptr, tmp); |
953 | } else { | 963 | i_dmfc0(p, tmp, C0_BADVADDR); |
954 | #ifdef CONFIG_BUILD_ELF64 | 964 | i_ld(p, ptr, rel_lo(pgdc), ptr); |
955 | i_dmfc0(p, ptr, C0_CONTEXT); | 965 | # else |
956 | i_dsrl(p, ptr, ptr, 23); | 966 | /* |
957 | i_dsll(p, ptr, ptr, 3); | 967 | * 64 bit SMP running in compat space has the lower part of |
958 | i_LA_mostly(p, tmp, pgdc); | 968 | * &pgd_current[smp_processor_id()] stored in CONTEXT. |
959 | i_daddu(p, ptr, ptr, tmp); | 969 | */ |
960 | i_dmfc0(p, tmp, C0_BADVADDR); | 970 | if (!in_compat_space_p(pgdc)) |
961 | i_ld(p, ptr, rel_lo(pgdc), ptr); | 971 | panic("Invalid page directory address!"); |
962 | #else | 972 | |
963 | i_dmfc0(p, ptr, C0_CONTEXT); | 973 | i_dmfc0(p, ptr, C0_CONTEXT); |
964 | i_lui(p, tmp, rel_highest(pgdc)); | 974 | i_dsra(p, ptr, ptr, 23); |
965 | i_dsll(p, ptr, ptr, 9); | 975 | i_ld(p, ptr, 0, ptr); |
966 | i_daddiu(p, tmp, tmp, rel_higher(pgdc)); | 976 | # endif |
967 | i_dsrl32(p, ptr, ptr, 0); | ||
968 | i_and(p, ptr, ptr, tmp); | ||
969 | i_dmfc0(p, tmp, C0_BADVADDR); | ||
970 | i_ld(p, ptr, 0, ptr); | ||
971 | #endif | ||
972 | } | ||
973 | #else | 977 | #else |
974 | i_LA_mostly(p, ptr, pgdc); | 978 | i_LA_mostly(p, ptr, pgdc); |
975 | i_ld(p, ptr, rel_lo(pgdc), ptr); | 979 | i_ld(p, ptr, rel_lo(pgdc), ptr); |
@@ -1026,7 +1030,6 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | |||
1026 | i_mfc0(p, ptr, C0_CONTEXT); | 1030 | i_mfc0(p, ptr, C0_CONTEXT); |
1027 | i_LA_mostly(p, tmp, pgdc); | 1031 | i_LA_mostly(p, tmp, pgdc); |
1028 | i_srl(p, ptr, ptr, 23); | 1032 | i_srl(p, ptr, ptr, 23); |
1029 | i_sll(p, ptr, ptr, 2); | ||
1030 | i_addu(p, ptr, tmp, ptr); | 1033 | i_addu(p, ptr, tmp, ptr); |
1031 | #else | 1034 | #else |
1032 | i_LA_mostly(p, ptr, pgdc); | 1035 | i_LA_mostly(p, ptr, pgdc); |
@@ -1245,13 +1248,19 @@ static void __init build_r4000_tlb_refill_handler(void) | |||
1245 | { | 1248 | { |
1246 | int i; | 1249 | int i; |
1247 | 1250 | ||
1248 | for (i = 0; i < 64; i++) | 1251 | f = final_handler; |
1249 | printk("%08x\n", final_handler[i]); | 1252 | #ifdef CONFIG_64BIT |
1253 | if (final_len > 32) | ||
1254 | final_len = 64; | ||
1255 | else | ||
1256 | f = final_handler + 32; | ||
1257 | #endif /* CONFIG_64BIT */ | ||
1258 | for (i = 0; i < final_len; i++) | ||
1259 | printk("%08x\n", f[i]); | ||
1250 | } | 1260 | } |
1251 | #endif | 1261 | #endif |
1252 | 1262 | ||
1253 | memcpy((void *)CAC_BASE, final_handler, 0x100); | 1263 | memcpy((void *)CAC_BASE, final_handler, 0x100); |
1254 | flush_icache_range(CAC_BASE, CAC_BASE + 0x100); | ||
1255 | } | 1264 | } |
1256 | 1265 | ||
1257 | /* | 1266 | /* |
@@ -1277,37 +1286,41 @@ u32 __tlb_handler_align handle_tlbs[FASTPATH_SIZE]; | |||
1277 | u32 __tlb_handler_align handle_tlbm[FASTPATH_SIZE]; | 1286 | u32 __tlb_handler_align handle_tlbm[FASTPATH_SIZE]; |
1278 | 1287 | ||
1279 | static void __init | 1288 | static void __init |
1280 | iPTE_LW(u32 **p, struct label **l, unsigned int pte, int offset, | 1289 | iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr) |
1281 | unsigned int ptr) | ||
1282 | { | 1290 | { |
1283 | #ifdef CONFIG_SMP | 1291 | #ifdef CONFIG_SMP |
1284 | # ifdef CONFIG_64BIT_PHYS_ADDR | 1292 | # ifdef CONFIG_64BIT_PHYS_ADDR |
1285 | if (cpu_has_64bits) | 1293 | if (cpu_has_64bits) |
1286 | i_lld(p, pte, offset, ptr); | 1294 | i_lld(p, pte, 0, ptr); |
1287 | else | 1295 | else |
1288 | # endif | 1296 | # endif |
1289 | i_LL(p, pte, offset, ptr); | 1297 | i_LL(p, pte, 0, ptr); |
1290 | #else | 1298 | #else |
1291 | # ifdef CONFIG_64BIT_PHYS_ADDR | 1299 | # ifdef CONFIG_64BIT_PHYS_ADDR |
1292 | if (cpu_has_64bits) | 1300 | if (cpu_has_64bits) |
1293 | i_ld(p, pte, offset, ptr); | 1301 | i_ld(p, pte, 0, ptr); |
1294 | else | 1302 | else |
1295 | # endif | 1303 | # endif |
1296 | i_LW(p, pte, offset, ptr); | 1304 | i_LW(p, pte, 0, ptr); |
1297 | #endif | 1305 | #endif |
1298 | } | 1306 | } |
1299 | 1307 | ||
1300 | static void __init | 1308 | static void __init |
1301 | iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, int offset, | 1309 | iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr, |
1302 | unsigned int ptr) | 1310 | unsigned int mode) |
1303 | { | 1311 | { |
1312 | #ifdef CONFIG_64BIT_PHYS_ADDR | ||
1313 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); | ||
1314 | #endif | ||
1315 | |||
1316 | i_ori(p, pte, pte, mode); | ||
1304 | #ifdef CONFIG_SMP | 1317 | #ifdef CONFIG_SMP |
1305 | # ifdef CONFIG_64BIT_PHYS_ADDR | 1318 | # ifdef CONFIG_64BIT_PHYS_ADDR |
1306 | if (cpu_has_64bits) | 1319 | if (cpu_has_64bits) |
1307 | i_scd(p, pte, offset, ptr); | 1320 | i_scd(p, pte, 0, ptr); |
1308 | else | 1321 | else |
1309 | # endif | 1322 | # endif |
1310 | i_SC(p, pte, offset, ptr); | 1323 | i_SC(p, pte, 0, ptr); |
1311 | 1324 | ||
1312 | if (r10000_llsc_war()) | 1325 | if (r10000_llsc_war()) |
1313 | il_beqzl(p, r, pte, label_smp_pgtable_change); | 1326 | il_beqzl(p, r, pte, label_smp_pgtable_change); |
@@ -1318,7 +1331,7 @@ iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, int offset, | |||
1318 | if (!cpu_has_64bits) { | 1331 | if (!cpu_has_64bits) { |
1319 | /* no i_nop needed */ | 1332 | /* no i_nop needed */ |
1320 | i_ll(p, pte, sizeof(pte_t) / 2, ptr); | 1333 | i_ll(p, pte, sizeof(pte_t) / 2, ptr); |
1321 | i_ori(p, pte, pte, _PAGE_VALID); | 1334 | i_ori(p, pte, pte, hwmode); |
1322 | i_sc(p, pte, sizeof(pte_t) / 2, ptr); | 1335 | i_sc(p, pte, sizeof(pte_t) / 2, ptr); |
1323 | il_beqz(p, r, pte, label_smp_pgtable_change); | 1336 | il_beqz(p, r, pte, label_smp_pgtable_change); |
1324 | /* no i_nop needed */ | 1337 | /* no i_nop needed */ |
@@ -1331,15 +1344,15 @@ iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, int offset, | |||
1331 | #else | 1344 | #else |
1332 | # ifdef CONFIG_64BIT_PHYS_ADDR | 1345 | # ifdef CONFIG_64BIT_PHYS_ADDR |
1333 | if (cpu_has_64bits) | 1346 | if (cpu_has_64bits) |
1334 | i_sd(p, pte, offset, ptr); | 1347 | i_sd(p, pte, 0, ptr); |
1335 | else | 1348 | else |
1336 | # endif | 1349 | # endif |
1337 | i_SW(p, pte, offset, ptr); | 1350 | i_SW(p, pte, 0, ptr); |
1338 | 1351 | ||
1339 | # ifdef CONFIG_64BIT_PHYS_ADDR | 1352 | # ifdef CONFIG_64BIT_PHYS_ADDR |
1340 | if (!cpu_has_64bits) { | 1353 | if (!cpu_has_64bits) { |
1341 | i_lw(p, pte, sizeof(pte_t) / 2, ptr); | 1354 | i_lw(p, pte, sizeof(pte_t) / 2, ptr); |
1342 | i_ori(p, pte, pte, _PAGE_VALID); | 1355 | i_ori(p, pte, pte, hwmode); |
1343 | i_sw(p, pte, sizeof(pte_t) / 2, ptr); | 1356 | i_sw(p, pte, sizeof(pte_t) / 2, ptr); |
1344 | i_lw(p, pte, 0, ptr); | 1357 | i_lw(p, pte, 0, ptr); |
1345 | } | 1358 | } |
@@ -1359,7 +1372,7 @@ build_pte_present(u32 **p, struct label **l, struct reloc **r, | |||
1359 | i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1372 | i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); |
1360 | i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1373 | i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); |
1361 | il_bnez(p, r, pte, lid); | 1374 | il_bnez(p, r, pte, lid); |
1362 | iPTE_LW(p, l, pte, 0, ptr); | 1375 | iPTE_LW(p, l, pte, ptr); |
1363 | } | 1376 | } |
1364 | 1377 | ||
1365 | /* Make PTE valid, store result in PTR. */ | 1378 | /* Make PTE valid, store result in PTR. */ |
@@ -1367,8 +1380,9 @@ static void __init | |||
1367 | build_make_valid(u32 **p, struct reloc **r, unsigned int pte, | 1380 | build_make_valid(u32 **p, struct reloc **r, unsigned int pte, |
1368 | unsigned int ptr) | 1381 | unsigned int ptr) |
1369 | { | 1382 | { |
1370 | i_ori(p, pte, pte, _PAGE_VALID | _PAGE_ACCESSED); | 1383 | unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; |
1371 | iPTE_SW(p, r, pte, 0, ptr); | 1384 | |
1385 | iPTE_SW(p, r, pte, ptr, mode); | ||
1372 | } | 1386 | } |
1373 | 1387 | ||
1374 | /* | 1388 | /* |
@@ -1382,7 +1396,7 @@ build_pte_writable(u32 **p, struct label **l, struct reloc **r, | |||
1382 | i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); | 1396 | i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); |
1383 | i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); | 1397 | i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); |
1384 | il_bnez(p, r, pte, lid); | 1398 | il_bnez(p, r, pte, lid); |
1385 | iPTE_LW(p, l, pte, 0, ptr); | 1399 | iPTE_LW(p, l, pte, ptr); |
1386 | } | 1400 | } |
1387 | 1401 | ||
1388 | /* Make PTE writable, update software status bits as well, then store | 1402 | /* Make PTE writable, update software status bits as well, then store |
@@ -1392,9 +1406,10 @@ static void __init | |||
1392 | build_make_write(u32 **p, struct reloc **r, unsigned int pte, | 1406 | build_make_write(u32 **p, struct reloc **r, unsigned int pte, |
1393 | unsigned int ptr) | 1407 | unsigned int ptr) |
1394 | { | 1408 | { |
1395 | i_ori(p, pte, pte, | 1409 | unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID |
1396 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); | 1410 | | _PAGE_DIRTY); |
1397 | iPTE_SW(p, r, pte, 0, ptr); | 1411 | |
1412 | iPTE_SW(p, r, pte, ptr, mode); | ||
1398 | } | 1413 | } |
1399 | 1414 | ||
1400 | /* | 1415 | /* |
@@ -1407,41 +1422,48 @@ build_pte_modifiable(u32 **p, struct label **l, struct reloc **r, | |||
1407 | { | 1422 | { |
1408 | i_andi(p, pte, pte, _PAGE_WRITE); | 1423 | i_andi(p, pte, pte, _PAGE_WRITE); |
1409 | il_beqz(p, r, pte, lid); | 1424 | il_beqz(p, r, pte, lid); |
1410 | iPTE_LW(p, l, pte, 0, ptr); | 1425 | iPTE_LW(p, l, pte, ptr); |
1411 | } | 1426 | } |
1412 | 1427 | ||
1413 | /* | 1428 | /* |
1414 | * R3000 style TLB load/store/modify handlers. | 1429 | * R3000 style TLB load/store/modify handlers. |
1415 | */ | 1430 | */ |
1416 | 1431 | ||
1417 | /* This places the pte in the page table at PTR into ENTRYLO0. */ | 1432 | /* |
1433 | * This places the pte into ENTRYLO0 and writes it with tlbwi. | ||
1434 | * Then it returns. | ||
1435 | */ | ||
1418 | static void __init | 1436 | static void __init |
1419 | build_r3000_pte_reload(u32 **p, unsigned int ptr) | 1437 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) |
1420 | { | 1438 | { |
1421 | i_lw(p, ptr, 0, ptr); | 1439 | i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1422 | i_nop(p); /* load delay */ | 1440 | i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ |
1423 | i_mtc0(p, ptr, C0_ENTRYLO0); | 1441 | i_tlbwi(p); |
1424 | i_nop(p); /* cp0 delay */ | 1442 | i_jr(p, tmp); |
1443 | i_rfe(p); /* branch delay */ | ||
1425 | } | 1444 | } |
1426 | 1445 | ||
1427 | /* | 1446 | /* |
1428 | * The index register may have the probe fail bit set, | 1447 | * This places the pte into ENTRYLO0 and writes it with tlbwi |
1429 | * because we would trap on access kseg2, i.e. without refill. | 1448 | * or tlbwr as appropriate. This is because the index register |
1449 | * may have the probe fail bit set as a result of a trap on a | ||
1450 | * kseg2 access, i.e. without refill. Then it returns. | ||
1430 | */ | 1451 | */ |
1431 | static void __init | 1452 | static void __init |
1432 | build_r3000_tlb_write(u32 **p, struct label **l, struct reloc **r, | 1453 | build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r, |
1433 | unsigned int tmp) | 1454 | unsigned int pte, unsigned int tmp) |
1434 | { | 1455 | { |
1435 | i_mfc0(p, tmp, C0_INDEX); | 1456 | i_mfc0(p, tmp, C0_INDEX); |
1436 | i_nop(p); /* cp0 delay */ | 1457 | i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1437 | il_bltz(p, r, tmp, label_r3000_write_probe_fail); | 1458 | il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ |
1438 | i_nop(p); /* branch delay */ | 1459 | i_mfc0(p, tmp, C0_EPC); /* branch delay */ |
1439 | i_tlbwi(p); | 1460 | i_tlbwi(p); /* cp0 delay */ |
1440 | il_b(p, r, label_r3000_write_probe_ok); | 1461 | i_jr(p, tmp); |
1441 | i_nop(p); /* branch delay */ | 1462 | i_rfe(p); /* branch delay */ |
1442 | l_r3000_write_probe_fail(l, *p); | 1463 | l_r3000_write_probe_fail(l, *p); |
1443 | i_tlbwr(p); | 1464 | i_tlbwr(p); /* cp0 delay */ |
1444 | l_r3000_write_probe_ok(l, *p); | 1465 | i_jr(p, tmp); |
1466 | i_rfe(p); /* branch delay */ | ||
1445 | } | 1467 | } |
1446 | 1468 | ||
1447 | static void __init | 1469 | static void __init |
@@ -1461,17 +1483,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, | |||
1461 | i_andi(p, pte, pte, 0xffc); /* load delay */ | 1483 | i_andi(p, pte, pte, 0xffc); /* load delay */ |
1462 | i_addu(p, ptr, ptr, pte); | 1484 | i_addu(p, ptr, ptr, pte); |
1463 | i_lw(p, pte, 0, ptr); | 1485 | i_lw(p, pte, 0, ptr); |
1464 | i_nop(p); /* load delay */ | 1486 | i_tlbp(p); /* load delay */ |
1465 | i_tlbp(p); | ||
1466 | } | ||
1467 | |||
1468 | static void __init | ||
1469 | build_r3000_tlbchange_handler_tail(u32 **p, unsigned int tmp) | ||
1470 | { | ||
1471 | i_mfc0(p, tmp, C0_EPC); | ||
1472 | i_nop(p); /* cp0 delay */ | ||
1473 | i_jr(p, tmp); | ||
1474 | i_rfe(p); /* branch delay */ | ||
1475 | } | 1487 | } |
1476 | 1488 | ||
1477 | static void __init build_r3000_tlb_load_handler(void) | 1489 | static void __init build_r3000_tlb_load_handler(void) |
@@ -1486,10 +1498,9 @@ static void __init build_r3000_tlb_load_handler(void) | |||
1486 | 1498 | ||
1487 | build_r3000_tlbchange_handler_head(&p, K0, K1); | 1499 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1488 | build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); | 1500 | build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); |
1501 | i_nop(&p); /* load delay */ | ||
1489 | build_make_valid(&p, &r, K0, K1); | 1502 | build_make_valid(&p, &r, K0, K1); |
1490 | build_r3000_pte_reload(&p, K1); | 1503 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1491 | build_r3000_tlb_write(&p, &l, &r, K0); | ||
1492 | build_r3000_tlbchange_handler_tail(&p, K0); | ||
1493 | 1504 | ||
1494 | l_nopage_tlbl(&l, p); | 1505 | l_nopage_tlbl(&l, p); |
1495 | i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | 1506 | i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); |
@@ -1506,13 +1517,10 @@ static void __init build_r3000_tlb_load_handler(void) | |||
1506 | { | 1517 | { |
1507 | int i; | 1518 | int i; |
1508 | 1519 | ||
1509 | for (i = 0; i < FASTPATH_SIZE; i++) | 1520 | for (i = 0; i < (p - handle_tlbl); i++) |
1510 | printk("%08x\n", handle_tlbl[i]); | 1521 | printk("%08x\n", handle_tlbl[i]); |
1511 | } | 1522 | } |
1512 | #endif | 1523 | #endif |
1513 | |||
1514 | flush_icache_range((unsigned long)handle_tlbl, | ||
1515 | (unsigned long)handle_tlbl + FASTPATH_SIZE * sizeof(u32)); | ||
1516 | } | 1524 | } |
1517 | 1525 | ||
1518 | static void __init build_r3000_tlb_store_handler(void) | 1526 | static void __init build_r3000_tlb_store_handler(void) |
@@ -1527,10 +1535,9 @@ static void __init build_r3000_tlb_store_handler(void) | |||
1527 | 1535 | ||
1528 | build_r3000_tlbchange_handler_head(&p, K0, K1); | 1536 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1529 | build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); | 1537 | build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); |
1538 | i_nop(&p); /* load delay */ | ||
1530 | build_make_write(&p, &r, K0, K1); | 1539 | build_make_write(&p, &r, K0, K1); |
1531 | build_r3000_pte_reload(&p, K1); | 1540 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1532 | build_r3000_tlb_write(&p, &l, &r, K0); | ||
1533 | build_r3000_tlbchange_handler_tail(&p, K0); | ||
1534 | 1541 | ||
1535 | l_nopage_tlbs(&l, p); | 1542 | l_nopage_tlbs(&l, p); |
1536 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 1543 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
@@ -1547,13 +1554,10 @@ static void __init build_r3000_tlb_store_handler(void) | |||
1547 | { | 1554 | { |
1548 | int i; | 1555 | int i; |
1549 | 1556 | ||
1550 | for (i = 0; i < FASTPATH_SIZE; i++) | 1557 | for (i = 0; i < (p - handle_tlbs); i++) |
1551 | printk("%08x\n", handle_tlbs[i]); | 1558 | printk("%08x\n", handle_tlbs[i]); |
1552 | } | 1559 | } |
1553 | #endif | 1560 | #endif |
1554 | |||
1555 | flush_icache_range((unsigned long)handle_tlbs, | ||
1556 | (unsigned long)handle_tlbs + FASTPATH_SIZE * sizeof(u32)); | ||
1557 | } | 1561 | } |
1558 | 1562 | ||
1559 | static void __init build_r3000_tlb_modify_handler(void) | 1563 | static void __init build_r3000_tlb_modify_handler(void) |
@@ -1568,10 +1572,9 @@ static void __init build_r3000_tlb_modify_handler(void) | |||
1568 | 1572 | ||
1569 | build_r3000_tlbchange_handler_head(&p, K0, K1); | 1573 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1570 | build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); | 1574 | build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); |
1575 | i_nop(&p); /* load delay */ | ||
1571 | build_make_write(&p, &r, K0, K1); | 1576 | build_make_write(&p, &r, K0, K1); |
1572 | build_r3000_pte_reload(&p, K1); | 1577 | build_r3000_pte_reload_tlbwi(&p, K0, K1); |
1573 | i_tlbwi(&p); | ||
1574 | build_r3000_tlbchange_handler_tail(&p, K0); | ||
1575 | 1578 | ||
1576 | l_nopage_tlbm(&l, p); | 1579 | l_nopage_tlbm(&l, p); |
1577 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 1580 | i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); |
@@ -1588,13 +1591,10 @@ static void __init build_r3000_tlb_modify_handler(void) | |||
1588 | { | 1591 | { |
1589 | int i; | 1592 | int i; |
1590 | 1593 | ||
1591 | for (i = 0; i < FASTPATH_SIZE; i++) | 1594 | for (i = 0; i < (p - handle_tlbm); i++) |
1592 | printk("%08x\n", handle_tlbm[i]); | 1595 | printk("%08x\n", handle_tlbm[i]); |
1593 | } | 1596 | } |
1594 | #endif | 1597 | #endif |
1595 | |||
1596 | flush_icache_range((unsigned long)handle_tlbm, | ||
1597 | (unsigned long)handle_tlbm + FASTPATH_SIZE * sizeof(u32)); | ||
1598 | } | 1598 | } |
1599 | 1599 | ||
1600 | /* | 1600 | /* |
@@ -1620,7 +1620,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct label **l, | |||
1620 | #ifdef CONFIG_SMP | 1620 | #ifdef CONFIG_SMP |
1621 | l_smp_pgtable_change(l, *p); | 1621 | l_smp_pgtable_change(l, *p); |
1622 | # endif | 1622 | # endif |
1623 | iPTE_LW(p, l, pte, 0, ptr); /* get even pte */ | 1623 | iPTE_LW(p, l, pte, ptr); /* get even pte */ |
1624 | build_tlb_probe_entry(p); | 1624 | build_tlb_probe_entry(p); |
1625 | } | 1625 | } |
1626 | 1626 | ||
@@ -1680,13 +1680,10 @@ static void __init build_r4000_tlb_load_handler(void) | |||
1680 | { | 1680 | { |
1681 | int i; | 1681 | int i; |
1682 | 1682 | ||
1683 | for (i = 0; i < FASTPATH_SIZE; i++) | 1683 | for (i = 0; i < (p - handle_tlbl); i++) |
1684 | printk("%08x\n", handle_tlbl[i]); | 1684 | printk("%08x\n", handle_tlbl[i]); |
1685 | } | 1685 | } |
1686 | #endif | 1686 | #endif |
1687 | |||
1688 | flush_icache_range((unsigned long)handle_tlbl, | ||
1689 | (unsigned long)handle_tlbl + FASTPATH_SIZE * sizeof(u32)); | ||
1690 | } | 1687 | } |
1691 | 1688 | ||
1692 | static void __init build_r4000_tlb_store_handler(void) | 1689 | static void __init build_r4000_tlb_store_handler(void) |
@@ -1719,13 +1716,10 @@ static void __init build_r4000_tlb_store_handler(void) | |||
1719 | { | 1716 | { |
1720 | int i; | 1717 | int i; |
1721 | 1718 | ||
1722 | for (i = 0; i < FASTPATH_SIZE; i++) | 1719 | for (i = 0; i < (p - handle_tlbs); i++) |
1723 | printk("%08x\n", handle_tlbs[i]); | 1720 | printk("%08x\n", handle_tlbs[i]); |
1724 | } | 1721 | } |
1725 | #endif | 1722 | #endif |
1726 | |||
1727 | flush_icache_range((unsigned long)handle_tlbs, | ||
1728 | (unsigned long)handle_tlbs + FASTPATH_SIZE * sizeof(u32)); | ||
1729 | } | 1723 | } |
1730 | 1724 | ||
1731 | static void __init build_r4000_tlb_modify_handler(void) | 1725 | static void __init build_r4000_tlb_modify_handler(void) |
@@ -1759,13 +1753,10 @@ static void __init build_r4000_tlb_modify_handler(void) | |||
1759 | { | 1753 | { |
1760 | int i; | 1754 | int i; |
1761 | 1755 | ||
1762 | for (i = 0; i < FASTPATH_SIZE; i++) | 1756 | for (i = 0; i < (p - handle_tlbm); i++) |
1763 | printk("%08x\n", handle_tlbm[i]); | 1757 | printk("%08x\n", handle_tlbm[i]); |
1764 | } | 1758 | } |
1765 | #endif | 1759 | #endif |
1766 | |||
1767 | flush_icache_range((unsigned long)handle_tlbm, | ||
1768 | (unsigned long)handle_tlbm + FASTPATH_SIZE * sizeof(u32)); | ||
1769 | } | 1760 | } |
1770 | 1761 | ||
1771 | void __init build_tlb_refill_handler(void) | 1762 | void __init build_tlb_refill_handler(void) |
@@ -1813,3 +1804,13 @@ void __init build_tlb_refill_handler(void) | |||
1813 | } | 1804 | } |
1814 | } | 1805 | } |
1815 | } | 1806 | } |
1807 | |||
1808 | void __init flush_tlb_handlers(void) | ||
1809 | { | ||
1810 | flush_icache_range((unsigned long)handle_tlbl, | ||
1811 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); | ||
1812 | flush_icache_range((unsigned long)handle_tlbs, | ||
1813 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); | ||
1814 | flush_icache_range((unsigned long)handle_tlbm, | ||
1815 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); | ||
1816 | } | ||