diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/cache.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 150 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 23 |
7 files changed, 135 insertions, 44 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index be8627bc5b02..12af739048fa 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
@@ -133,7 +133,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, | |||
133 | } | 133 | } |
134 | 134 | ||
135 | unsigned long _page_cachable_default; | 135 | unsigned long _page_cachable_default; |
136 | EXPORT_SYMBOL_GPL(_page_cachable_default); | 136 | EXPORT_SYMBOL(_page_cachable_default); |
137 | 137 | ||
138 | static inline void setup_protection_map(void) | 138 | static inline void setup_protection_map(void) |
139 | { | 139 | { |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 9367e33fbd18..9547bc0cf188 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/scatterlist.h> | 15 | #include <linux/scatterlist.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/gfp.h> | ||
17 | 18 | ||
18 | #include <asm/cache.h> | 19 | #include <asm/cache.h> |
19 | #include <asm/io.h> | 20 | #include <asm/io.h> |
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index cd0660c51f28..a7fee0dfb7a9 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/hugetlb.h> | 17 | #include <linux/hugetlb.h> |
18 | #include <linux/pagemap.h> | 18 | #include <linux/pagemap.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/err.h> | 19 | #include <linux/err.h> |
21 | #include <linux/sysctl.h> | 20 | #include <linux/sysctl.h> |
22 | #include <asm/mman.h> | 21 | #include <asm/mman.h> |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 12539af38a99..2efcbd24c82f 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/proc_fs.h> | 28 | #include <linux/proc_fs.h> |
29 | #include <linux/pfn.h> | 29 | #include <linux/pfn.h> |
30 | #include <linux/hardirq.h> | 30 | #include <linux/hardirq.h> |
31 | #include <linux/gfp.h> | ||
31 | 32 | ||
32 | #include <asm/asm-offsets.h> | 33 | #include <asm/asm-offsets.h> |
33 | #include <asm/bootinfo.h> | 34 | #include <asm/bootinfo.h> |
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 0c43248347bd..cacfd31e8ec9 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/addrspace.h> | 10 | #include <asm/addrspace.h> |
11 | #include <asm/byteorder.h> | 11 | #include <asm/byteorder.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
15 | #include <asm/io.h> | 16 | #include <asm/io.h> |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0de0e4127d66..86f004dc8355 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -31,6 +31,16 @@ | |||
31 | #include <asm/war.h> | 31 | #include <asm/war.h> |
32 | #include <asm/uasm.h> | 32 | #include <asm/uasm.h> |
33 | 33 | ||
34 | /* | ||
35 | * TLB load/store/modify handlers. | ||
36 | * | ||
37 | * Only the fastpath gets synthesized at runtime, the slowpath for | ||
38 | * do_page_fault remains normal asm. | ||
39 | */ | ||
40 | extern void tlb_do_page_fault_0(void); | ||
41 | extern void tlb_do_page_fault_1(void); | ||
42 | |||
43 | |||
34 | static inline int r45k_bvahwbug(void) | 44 | static inline int r45k_bvahwbug(void) |
35 | { | 45 | { |
36 | /* XXX: We should probe for the presence of this bug, but we don't. */ | 46 | /* XXX: We should probe for the presence of this bug, but we don't. */ |
@@ -83,6 +93,7 @@ enum label_id { | |||
83 | label_nopage_tlbm, | 93 | label_nopage_tlbm, |
84 | label_smp_pgtable_change, | 94 | label_smp_pgtable_change, |
85 | label_r3000_write_probe_fail, | 95 | label_r3000_write_probe_fail, |
96 | label_large_segbits_fault, | ||
86 | #ifdef CONFIG_HUGETLB_PAGE | 97 | #ifdef CONFIG_HUGETLB_PAGE |
87 | label_tlb_huge_update, | 98 | label_tlb_huge_update, |
88 | #endif | 99 | #endif |
@@ -101,6 +112,7 @@ UASM_L_LA(_nopage_tlbs) | |||
101 | UASM_L_LA(_nopage_tlbm) | 112 | UASM_L_LA(_nopage_tlbm) |
102 | UASM_L_LA(_smp_pgtable_change) | 113 | UASM_L_LA(_smp_pgtable_change) |
103 | UASM_L_LA(_r3000_write_probe_fail) | 114 | UASM_L_LA(_r3000_write_probe_fail) |
115 | UASM_L_LA(_large_segbits_fault) | ||
104 | #ifdef CONFIG_HUGETLB_PAGE | 116 | #ifdef CONFIG_HUGETLB_PAGE |
105 | UASM_L_LA(_tlb_huge_update) | 117 | UASM_L_LA(_tlb_huge_update) |
106 | #endif | 118 | #endif |
@@ -157,6 +169,10 @@ static u32 tlb_handler[128] __cpuinitdata; | |||
157 | static struct uasm_label labels[128] __cpuinitdata; | 169 | static struct uasm_label labels[128] __cpuinitdata; |
158 | static struct uasm_reloc relocs[128] __cpuinitdata; | 170 | static struct uasm_reloc relocs[128] __cpuinitdata; |
159 | 171 | ||
172 | #ifdef CONFIG_64BIT | ||
173 | static int check_for_high_segbits __cpuinitdata; | ||
174 | #endif | ||
175 | |||
160 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 176 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
161 | /* | 177 | /* |
162 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, | 178 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, |
@@ -408,7 +424,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, | |||
408 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 424 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
409 | } else { | 425 | } else { |
410 | #ifdef CONFIG_64BIT_PHYS_ADDR | 426 | #ifdef CONFIG_64BIT_PHYS_ADDR |
411 | uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL)); | 427 | uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); |
412 | #else | 428 | #else |
413 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | 429 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); |
414 | #endif | 430 | #endif |
@@ -532,7 +548,24 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
532 | * The vmalloc handling is not in the hotpath. | 548 | * The vmalloc handling is not in the hotpath. |
533 | */ | 549 | */ |
534 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 550 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
535 | uasm_il_bltz(p, r, tmp, label_vmalloc); | 551 | |
552 | if (check_for_high_segbits) { | ||
553 | /* | ||
554 | * The kernel currently implicitely assumes that the | ||
555 | * MIPS SEGBITS parameter for the processor is | ||
556 | * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never | ||
557 | * allocate virtual addresses outside the maximum | ||
558 | * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But | ||
559 | * that doesn't prevent user code from accessing the | ||
560 | * higher xuseg addresses. Here, we make sure that | ||
561 | * everything but the lower xuseg addresses goes down | ||
562 | * the module_alloc/vmalloc path. | ||
563 | */ | ||
564 | uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | ||
565 | uasm_il_bnez(p, r, ptr, label_vmalloc); | ||
566 | } else { | ||
567 | uasm_il_bltz(p, r, tmp, label_vmalloc); | ||
568 | } | ||
536 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ | 569 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ |
537 | 570 | ||
538 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 571 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
@@ -549,14 +582,14 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
549 | * SMTC uses TCBind value as "CPU" index | 582 | * SMTC uses TCBind value as "CPU" index |
550 | */ | 583 | */ |
551 | uasm_i_mfc0(p, ptr, C0_TCBIND); | 584 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
552 | uasm_i_dsrl(p, ptr, ptr, 19); | 585 | uasm_i_dsrl_safe(p, ptr, ptr, 19); |
553 | # else | 586 | # else |
554 | /* | 587 | /* |
555 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 | 588 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
556 | * stored in CONTEXT. | 589 | * stored in CONTEXT. |
557 | */ | 590 | */ |
558 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); | 591 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); |
559 | uasm_i_dsrl(p, ptr, ptr, 23); | 592 | uasm_i_dsrl_safe(p, ptr, ptr, 23); |
560 | # endif | 593 | # endif |
561 | UASM_i_LA_mostly(p, tmp, pgdc); | 594 | UASM_i_LA_mostly(p, tmp, pgdc); |
562 | uasm_i_daddu(p, ptr, ptr, tmp); | 595 | uasm_i_daddu(p, ptr, ptr, tmp); |
@@ -569,44 +602,78 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
569 | 602 | ||
570 | uasm_l_vmalloc_done(l, *p); | 603 | uasm_l_vmalloc_done(l, *p); |
571 | 604 | ||
572 | if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ | 605 | /* get pgd offset in bytes */ |
573 | uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); | 606 | uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); |
574 | else | ||
575 | uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); | ||
576 | 607 | ||
577 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | 608 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); |
578 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | 609 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ |
579 | #ifndef __PAGETABLE_PMD_FOLDED | 610 | #ifndef __PAGETABLE_PMD_FOLDED |
580 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | 611 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
581 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | 612 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ |
582 | uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ | 613 | uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ |
583 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); | 614 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); |
584 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | 615 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ |
585 | #endif | 616 | #endif |
586 | } | 617 | } |
587 | 618 | ||
619 | enum vmalloc64_mode {not_refill, refill}; | ||
588 | /* | 620 | /* |
589 | * BVADDR is the faulting address, PTR is scratch. | 621 | * BVADDR is the faulting address, PTR is scratch. |
590 | * PTR will hold the pgd for vmalloc. | 622 | * PTR will hold the pgd for vmalloc. |
591 | */ | 623 | */ |
592 | static void __cpuinit | 624 | static void __cpuinit |
593 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 625 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
594 | unsigned int bvaddr, unsigned int ptr) | 626 | unsigned int bvaddr, unsigned int ptr, |
627 | enum vmalloc64_mode mode) | ||
595 | { | 628 | { |
596 | long swpd = (long)swapper_pg_dir; | 629 | long swpd = (long)swapper_pg_dir; |
630 | int single_insn_swpd; | ||
631 | int did_vmalloc_branch = 0; | ||
632 | |||
633 | single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); | ||
597 | 634 | ||
598 | uasm_l_vmalloc(l, *p); | 635 | uasm_l_vmalloc(l, *p); |
599 | 636 | ||
600 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | 637 | if (mode == refill && check_for_high_segbits) { |
601 | uasm_il_b(p, r, label_vmalloc_done); | 638 | if (single_insn_swpd) { |
602 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | 639 | uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); |
603 | } else { | 640 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); |
604 | UASM_i_LA_mostly(p, ptr, swpd); | 641 | did_vmalloc_branch = 1; |
605 | uasm_il_b(p, r, label_vmalloc_done); | 642 | /* fall through */ |
606 | if (uasm_in_compat_space_p(swpd)) | 643 | } else { |
607 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 644 | uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); |
608 | else | 645 | } |
609 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 646 | } |
647 | if (!did_vmalloc_branch) { | ||
648 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | ||
649 | uasm_il_b(p, r, label_vmalloc_done); | ||
650 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | ||
651 | } else { | ||
652 | UASM_i_LA_mostly(p, ptr, swpd); | ||
653 | uasm_il_b(p, r, label_vmalloc_done); | ||
654 | if (uasm_in_compat_space_p(swpd)) | ||
655 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | ||
656 | else | ||
657 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | ||
658 | } | ||
659 | } | ||
660 | if (mode == refill && check_for_high_segbits) { | ||
661 | uasm_l_large_segbits_fault(l, *p); | ||
662 | /* | ||
663 | * We get here if we are an xsseg address, or if we are | ||
664 | * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. | ||
665 | * | ||
666 | * Ignoring xsseg (assume disabled so would generate | ||
667 | * (address errors?), the only remaining possibility | ||
668 | * is the upper xuseg addresses. On processors with | ||
669 | * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these | ||
670 | * addresses would have taken an address error. We try | ||
671 | * to mimic that here by taking a load/istream page | ||
672 | * fault. | ||
673 | */ | ||
674 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | ||
675 | uasm_i_jr(p, ptr); | ||
676 | uasm_i_nop(p); | ||
610 | } | 677 | } |
611 | } | 678 | } |
612 | 679 | ||
@@ -720,9 +787,9 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | |||
720 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 787 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
721 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 788 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
722 | } else { | 789 | } else { |
723 | uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | 790 | uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ |
724 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 791 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
725 | uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | 792 | uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ |
726 | } | 793 | } |
727 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 794 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
728 | } else { | 795 | } else { |
@@ -788,10 +855,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
788 | * create the plain linear handler | 855 | * create the plain linear handler |
789 | */ | 856 | */ |
790 | if (bcm1250_m3_war()) { | 857 | if (bcm1250_m3_war()) { |
791 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 858 | unsigned int segbits = 44; |
792 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 859 | |
860 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | ||
861 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | ||
793 | uasm_i_xor(&p, K0, K0, K1); | 862 | uasm_i_xor(&p, K0, K0, K1); |
794 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 863 | uasm_i_dsrl_safe(&p, K1, K0, 62); |
864 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | ||
865 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | ||
866 | uasm_i_or(&p, K0, K0, K1); | ||
795 | uasm_il_bnez(&p, &r, K0, label_leave); | 867 | uasm_il_bnez(&p, &r, K0, label_leave); |
796 | /* No need for uasm_i_nop */ | 868 | /* No need for uasm_i_nop */ |
797 | } | 869 | } |
@@ -820,7 +892,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
820 | #endif | 892 | #endif |
821 | 893 | ||
822 | #ifdef CONFIG_64BIT | 894 | #ifdef CONFIG_64BIT |
823 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); | 895 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill); |
824 | #endif | 896 | #endif |
825 | 897 | ||
826 | /* | 898 | /* |
@@ -930,15 +1002,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
930 | } | 1002 | } |
931 | 1003 | ||
932 | /* | 1004 | /* |
933 | * TLB load/store/modify handlers. | ||
934 | * | ||
935 | * Only the fastpath gets synthesized at runtime, the slowpath for | ||
936 | * do_page_fault remains normal asm. | ||
937 | */ | ||
938 | extern void tlb_do_page_fault_0(void); | ||
939 | extern void tlb_do_page_fault_1(void); | ||
940 | |||
941 | /* | ||
942 | * 128 instructions for the fastpath handler is generous and should | 1005 | * 128 instructions for the fastpath handler is generous and should |
943 | * never be exceeded. | 1006 | * never be exceeded. |
944 | */ | 1007 | */ |
@@ -1297,7 +1360,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, | |||
1297 | uasm_i_eret(p); /* return from trap */ | 1360 | uasm_i_eret(p); /* return from trap */ |
1298 | 1361 | ||
1299 | #ifdef CONFIG_64BIT | 1362 | #ifdef CONFIG_64BIT |
1300 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr); | 1363 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); |
1301 | #endif | 1364 | #endif |
1302 | } | 1365 | } |
1303 | 1366 | ||
@@ -1312,10 +1375,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1312 | memset(relocs, 0, sizeof(relocs)); | 1375 | memset(relocs, 0, sizeof(relocs)); |
1313 | 1376 | ||
1314 | if (bcm1250_m3_war()) { | 1377 | if (bcm1250_m3_war()) { |
1315 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 1378 | unsigned int segbits = 44; |
1316 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 1379 | |
1380 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | ||
1381 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | ||
1317 | uasm_i_xor(&p, K0, K0, K1); | 1382 | uasm_i_xor(&p, K0, K0, K1); |
1318 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 1383 | uasm_i_dsrl_safe(&p, K1, K0, 62); |
1384 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | ||
1385 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | ||
1386 | uasm_i_or(&p, K0, K0, K1); | ||
1319 | uasm_il_bnez(&p, &r, K0, label_leave); | 1387 | uasm_il_bnez(&p, &r, K0, label_leave); |
1320 | /* No need for uasm_i_nop */ | 1388 | /* No need for uasm_i_nop */ |
1321 | } | 1389 | } |
@@ -1516,6 +1584,10 @@ void __cpuinit build_tlb_refill_handler(void) | |||
1516 | */ | 1584 | */ |
1517 | static int run_once = 0; | 1585 | static int run_once = 0; |
1518 | 1586 | ||
1587 | #ifdef CONFIG_64BIT | ||
1588 | check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | ||
1589 | #endif | ||
1590 | |||
1519 | switch (current_cpu_type()) { | 1591 | switch (current_cpu_type()) { |
1520 | case CPU_R2000: | 1592 | case CPU_R2000: |
1521 | case CPU_R3000: | 1593 | case CPU_R3000: |
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 1581e9852461..611d564fdcf1 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c | |||
@@ -31,7 +31,8 @@ enum fields { | |||
31 | BIMM = 0x040, | 31 | BIMM = 0x040, |
32 | JIMM = 0x080, | 32 | JIMM = 0x080, |
33 | FUNC = 0x100, | 33 | FUNC = 0x100, |
34 | SET = 0x200 | 34 | SET = 0x200, |
35 | SCIMM = 0x400 | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | #define OP_MASK 0x3f | 38 | #define OP_MASK 0x3f |
@@ -52,6 +53,8 @@ enum fields { | |||
52 | #define FUNC_SH 0 | 53 | #define FUNC_SH 0 |
53 | #define SET_MASK 0x7 | 54 | #define SET_MASK 0x7 |
54 | #define SET_SH 0 | 55 | #define SET_SH 0 |
56 | #define SCIMM_MASK 0xfffff | ||
57 | #define SCIMM_SH 6 | ||
55 | 58 | ||
56 | enum opcode { | 59 | enum opcode { |
57 | insn_invalid, | 60 | insn_invalid, |
@@ -61,10 +64,10 @@ enum opcode { | |||
61 | insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, | 64 | insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, |
62 | insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, | 65 | insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, |
63 | insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, | 66 | insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, |
64 | insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, | 67 | insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, |
65 | insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, | 68 | insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, |
66 | insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, | 69 | insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, |
67 | insn_dins | 70 | insn_dins, insn_syscall |
68 | }; | 71 | }; |
69 | 72 | ||
70 | struct insn { | 73 | struct insn { |
@@ -117,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = { | |||
117 | { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 120 | { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
118 | { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, | 121 | { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, |
119 | { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, | 122 | { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, |
123 | { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, | ||
120 | { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, | 124 | { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, |
121 | { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 125 | { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
122 | { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, | 126 | { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, |
@@ -136,6 +140,7 @@ static struct insn insn_table[] __cpuinitdata = { | |||
136 | { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, | 140 | { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, |
137 | { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, | 141 | { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, |
138 | { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, | 142 | { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, |
143 | { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, | ||
139 | { insn_invalid, 0, 0 } | 144 | { insn_invalid, 0, 0 } |
140 | }; | 145 | }; |
141 | 146 | ||
@@ -208,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg) | |||
208 | return (arg >> 2) & JIMM_MASK; | 213 | return (arg >> 2) & JIMM_MASK; |
209 | } | 214 | } |
210 | 215 | ||
216 | static inline __cpuinit u32 build_scimm(u32 arg) | ||
217 | { | ||
218 | if (arg & ~SCIMM_MASK) | ||
219 | printk(KERN_WARNING "Micro-assembler field overflow\n"); | ||
220 | |||
221 | return (arg & SCIMM_MASK) << SCIMM_SH; | ||
222 | } | ||
223 | |||
211 | static inline __cpuinit u32 build_func(u32 arg) | 224 | static inline __cpuinit u32 build_func(u32 arg) |
212 | { | 225 | { |
213 | if (arg & ~FUNC_MASK) | 226 | if (arg & ~FUNC_MASK) |
@@ -266,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...) | |||
266 | op |= build_func(va_arg(ap, u32)); | 279 | op |= build_func(va_arg(ap, u32)); |
267 | if (ip->fields & SET) | 280 | if (ip->fields & SET) |
268 | op |= build_set(va_arg(ap, u32)); | 281 | op |= build_set(va_arg(ap, u32)); |
282 | if (ip->fields & SCIMM) | ||
283 | op |= build_scimm(va_arg(ap, u32)); | ||
269 | va_end(ap); | 284 | va_end(ap); |
270 | 285 | ||
271 | **buf = op; | 286 | **buf = op; |
@@ -373,6 +388,7 @@ I_u2s3u1(_lw) | |||
373 | I_u1u2u3(_mfc0) | 388 | I_u1u2u3(_mfc0) |
374 | I_u1u2u3(_mtc0) | 389 | I_u1u2u3(_mtc0) |
375 | I_u2u1u3(_ori) | 390 | I_u2u1u3(_ori) |
391 | I_u3u1u2(_or) | ||
376 | I_u2s3u1(_pref) | 392 | I_u2s3u1(_pref) |
377 | I_0(_rfe) | 393 | I_0(_rfe) |
378 | I_u2s3u1(_sc) | 394 | I_u2s3u1(_sc) |
@@ -391,6 +407,7 @@ I_0(_tlbwr) | |||
391 | I_u3u1u2(_xor) | 407 | I_u3u1u2(_xor) |
392 | I_u2u1u3(_xori) | 408 | I_u2u1u3(_xori) |
393 | I_u2u1msbu3(_dins); | 409 | I_u2u1msbu3(_dins); |
410 | I_u1(_syscall); | ||
394 | 411 | ||
395 | /* Handle labels. */ | 412 | /* Handle labels. */ |
396 | void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) | 413 | void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) |