aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c590
1 files changed, 503 insertions, 87 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 93816f3bca67..083d3412d0bc 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -26,8 +26,10 @@
26#include <linux/smp.h> 26#include <linux/smp.h>
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/cache.h>
29 30
30#include <asm/mmu_context.h> 31#include <asm/cacheflush.h>
32#include <asm/pgtable.h>
31#include <asm/war.h> 33#include <asm/war.h>
32#include <asm/uasm.h> 34#include <asm/uasm.h>
33 35
@@ -63,6 +65,52 @@ static inline int __maybe_unused r10000_llsc_war(void)
63 return R10000_LLSC_WAR; 65 return R10000_LLSC_WAR;
64} 66}
65 67
68static int use_bbit_insns(void)
69{
70 switch (current_cpu_type()) {
71 case CPU_CAVIUM_OCTEON:
72 case CPU_CAVIUM_OCTEON_PLUS:
73 case CPU_CAVIUM_OCTEON2:
74 return 1;
75 default:
76 return 0;
77 }
78}
79
80static int use_lwx_insns(void)
81{
82 switch (current_cpu_type()) {
83 case CPU_CAVIUM_OCTEON2:
84 return 1;
85 default:
86 return 0;
87 }
88}
89#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
90 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
91static bool scratchpad_available(void)
92{
93 return true;
94}
95static int scratchpad_offset(int i)
96{
97 /*
98 * CVMSEG starts at address -32768 and extends for
99 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
100 */
101 i += 1; /* Kernel use starts at the top and works down. */
102 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
103}
104#else
105static bool scratchpad_available(void)
106{
107 return false;
108}
109static int scratchpad_offset(int i)
110{
111 BUG();
112}
113#endif
66/* 114/*
67 * Found by experiment: At least some revisions of the 4kc throw under 115 * Found by experiment: At least some revisions of the 4kc throw under
68 * some circumstances a machine check exception, triggered by invalid 116 * some circumstances a machine check exception, triggered by invalid
@@ -173,11 +221,41 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
173static int check_for_high_segbits __cpuinitdata; 221static int check_for_high_segbits __cpuinitdata;
174#endif 222#endif
175 223
224static int check_for_high_segbits __cpuinitdata;
225
226static unsigned int kscratch_used_mask __cpuinitdata;
227
228static int __cpuinit allocate_kscratch(void)
229{
230 int r;
231 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
232
233 r = ffs(a);
234
235 if (r == 0)
236 return -1;
237
238 r--; /* make it zero based */
239
240 kscratch_used_mask |= (1 << r);
241
242 return r;
243}
244
245static int scratch_reg __cpuinitdata;
246static int pgd_reg __cpuinitdata;
247enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
248
176#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 249#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
250
177/* 251/*
178 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, 252 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
179 * we cannot do r3000 under these circumstances. 253 * we cannot do r3000 under these circumstances.
254 *
255 * Declare pgd_current here instead of including mmu_context.h to avoid type
256 * conflicts for tlbmiss_handler_setup_pgd
180 */ 257 */
258extern unsigned long pgd_current[];
181 259
182/* 260/*
183 * The R3000 TLB handler is simple. 261 * The R3000 TLB handler is simple.
@@ -440,21 +518,43 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
440static __cpuinit void build_restore_pagemask(u32 **p, 518static __cpuinit void build_restore_pagemask(u32 **p,
441 struct uasm_reloc **r, 519 struct uasm_reloc **r,
442 unsigned int tmp, 520 unsigned int tmp,
443 enum label_id lid) 521 enum label_id lid,
522 int restore_scratch)
444{ 523{
445 /* Reset default page size */ 524 if (restore_scratch) {
446 if (PM_DEFAULT_MASK >> 16) { 525 /* Reset default page size */
447 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); 526 if (PM_DEFAULT_MASK >> 16) {
448 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); 527 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
449 uasm_il_b(p, r, lid); 528 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
450 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 529 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
451 } else if (PM_DEFAULT_MASK) { 530 uasm_il_b(p, r, lid);
452 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); 531 } else if (PM_DEFAULT_MASK) {
453 uasm_il_b(p, r, lid); 532 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
454 uasm_i_mtc0(p, tmp, C0_PAGEMASK); 533 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
534 uasm_il_b(p, r, lid);
535 } else {
536 uasm_i_mtc0(p, 0, C0_PAGEMASK);
537 uasm_il_b(p, r, lid);
538 }
539 if (scratch_reg > 0)
540 UASM_i_MFC0(p, 1, 31, scratch_reg);
541 else
542 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
455 } else { 543 } else {
456 uasm_il_b(p, r, lid); 544 /* Reset default page size */
457 uasm_i_mtc0(p, 0, C0_PAGEMASK); 545 if (PM_DEFAULT_MASK >> 16) {
546 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
547 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
548 uasm_il_b(p, r, lid);
549 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
550 } else if (PM_DEFAULT_MASK) {
551 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
552 uasm_il_b(p, r, lid);
553 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
554 } else {
555 uasm_il_b(p, r, lid);
556 uasm_i_mtc0(p, 0, C0_PAGEMASK);
557 }
458 } 558 }
459} 559}
460 560
@@ -462,7 +562,8 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
462 struct uasm_label **l, 562 struct uasm_label **l,
463 struct uasm_reloc **r, 563 struct uasm_reloc **r,
464 unsigned int tmp, 564 unsigned int tmp,
465 enum tlb_write_entry wmode) 565 enum tlb_write_entry wmode,
566 int restore_scratch)
466{ 567{
467 /* Set huge page tlb entry size */ 568 /* Set huge page tlb entry size */
468 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 569 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -471,7 +572,7 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
471 572
472 build_tlb_write_entry(p, l, r, wmode); 573 build_tlb_write_entry(p, l, r, wmode);
473 574
474 build_restore_pagemask(p, r, tmp, label_leave); 575 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
475} 576}
476 577
477/* 578/*
@@ -482,8 +583,12 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
482 unsigned int pmd, int lid) 583 unsigned int pmd, int lid)
483{ 584{
484 UASM_i_LW(p, tmp, 0, pmd); 585 UASM_i_LW(p, tmp, 0, pmd);
485 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); 586 if (use_bbit_insns()) {
486 uasm_il_bnez(p, r, tmp, lid); 587 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
588 } else {
589 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
590 uasm_il_bnez(p, r, tmp, lid);
591 }
487} 592}
488 593
489static __cpuinit void build_huge_update_entries(u32 **p, 594static __cpuinit void build_huge_update_entries(u32 **p,
@@ -532,7 +637,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
532 UASM_i_SW(p, pte, 0, ptr); 637 UASM_i_SW(p, pte, 0, ptr);
533#endif 638#endif
534 build_huge_update_entries(p, pte, ptr); 639 build_huge_update_entries(p, pte, ptr);
535 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed); 640 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
536} 641}
537#endif /* CONFIG_HUGETLB_PAGE */ 642#endif /* CONFIG_HUGETLB_PAGE */
538 643
@@ -573,13 +678,22 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
573 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 678 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
574 679
575#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 680#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
576 /* 681 if (pgd_reg != -1) {
577 * &pgd << 11 stored in CONTEXT [23..63]. 682 /* pgd is in pgd_reg */
578 */ 683 UASM_i_MFC0(p, ptr, 31, pgd_reg);
579 UASM_i_MFC0(p, ptr, C0_CONTEXT); 684 } else {
580 uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */ 685 /*
581 uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */ 686 * &pgd << 11 stored in CONTEXT [23..63].
582 uasm_i_drotr(p, ptr, ptr, 11); 687 */
688 UASM_i_MFC0(p, ptr, C0_CONTEXT);
689
690 /* Clear lower 23 bits of context. */
691 uasm_i_dins(p, ptr, 0, 0, 23);
692
693 /* 1 0 1 0 1 << 6 xkphys cached */
694 uasm_i_ori(p, ptr, ptr, 0x540);
695 uasm_i_drotr(p, ptr, ptr, 11);
696 }
583#elif defined(CONFIG_SMP) 697#elif defined(CONFIG_SMP)
584# ifdef CONFIG_MIPS_MT_SMTC 698# ifdef CONFIG_MIPS_MT_SMTC
585 /* 699 /*
@@ -620,7 +734,6 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
620#endif 734#endif
621} 735}
622 736
623enum vmalloc64_mode {not_refill, refill};
624/* 737/*
625 * BVADDR is the faulting address, PTR is scratch. 738 * BVADDR is the faulting address, PTR is scratch.
626 * PTR will hold the pgd for vmalloc. 739 * PTR will hold the pgd for vmalloc.
@@ -638,7 +751,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
638 751
639 uasm_l_vmalloc(l, *p); 752 uasm_l_vmalloc(l, *p);
640 753
641 if (mode == refill && check_for_high_segbits) { 754 if (mode != not_refill && check_for_high_segbits) {
642 if (single_insn_swpd) { 755 if (single_insn_swpd) {
643 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); 756 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
644 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 757 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
@@ -661,7 +774,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
661 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 774 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
662 } 775 }
663 } 776 }
664 if (mode == refill && check_for_high_segbits) { 777 if (mode != not_refill && check_for_high_segbits) {
665 uasm_l_large_segbits_fault(l, *p); 778 uasm_l_large_segbits_fault(l, *p);
666 /* 779 /*
667 * We get here if we are an xsseg address, or if we are 780 * We get here if we are an xsseg address, or if we are
@@ -677,7 +790,15 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
677 */ 790 */
678 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 791 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
679 uasm_i_jr(p, ptr); 792 uasm_i_jr(p, ptr);
680 uasm_i_nop(p); 793
794 if (mode == refill_scratch) {
795 if (scratch_reg > 0)
796 UASM_i_MFC0(p, 1, 31, scratch_reg);
797 else
798 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
799 } else {
800 uasm_i_nop(p);
801 }
681 } 802 }
682} 803}
683 804
@@ -834,6 +955,185 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
834#endif 955#endif
835} 956}
836 957
958struct mips_huge_tlb_info {
959 int huge_pte;
960 int restore_scratch;
961};
962
963static struct mips_huge_tlb_info __cpuinit
964build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
965 struct uasm_reloc **r, unsigned int tmp,
966 unsigned int ptr, int c0_scratch)
967{
968 struct mips_huge_tlb_info rv;
969 unsigned int even, odd;
970 int vmalloc_branch_delay_filled = 0;
971 const int scratch = 1; /* Our extra working register */
972
973 rv.huge_pte = scratch;
974 rv.restore_scratch = 0;
975
976 if (check_for_high_segbits) {
977 UASM_i_MFC0(p, tmp, C0_BADVADDR);
978
979 if (pgd_reg != -1)
980 UASM_i_MFC0(p, ptr, 31, pgd_reg);
981 else
982 UASM_i_MFC0(p, ptr, C0_CONTEXT);
983
984 if (c0_scratch >= 0)
985 UASM_i_MTC0(p, scratch, 31, c0_scratch);
986 else
987 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
988
989 uasm_i_dsrl_safe(p, scratch, tmp,
990 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
991 uasm_il_bnez(p, r, scratch, label_vmalloc);
992
993 if (pgd_reg == -1) {
994 vmalloc_branch_delay_filled = 1;
995 /* Clear lower 23 bits of context. */
996 uasm_i_dins(p, ptr, 0, 0, 23);
997 }
998 } else {
999 if (pgd_reg != -1)
1000 UASM_i_MFC0(p, ptr, 31, pgd_reg);
1001 else
1002 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1003
1004 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1005
1006 if (c0_scratch >= 0)
1007 UASM_i_MTC0(p, scratch, 31, c0_scratch);
1008 else
1009 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1010
1011 if (pgd_reg == -1)
1012 /* Clear lower 23 bits of context. */
1013 uasm_i_dins(p, ptr, 0, 0, 23);
1014
1015 uasm_il_bltz(p, r, tmp, label_vmalloc);
1016 }
1017
1018 if (pgd_reg == -1) {
1019 vmalloc_branch_delay_filled = 1;
1020 /* 1 0 1 0 1 << 6 xkphys cached */
1021 uasm_i_ori(p, ptr, ptr, 0x540);
1022 uasm_i_drotr(p, ptr, ptr, 11);
1023 }
1024
1025#ifdef __PAGETABLE_PMD_FOLDED
1026#define LOC_PTEP scratch
1027#else
1028#define LOC_PTEP ptr
1029#endif
1030
1031 if (!vmalloc_branch_delay_filled)
1032 /* get pgd offset in bytes */
1033 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1034
1035 uasm_l_vmalloc_done(l, *p);
1036
1037 /*
1038 * tmp ptr
1039 * fall-through case = badvaddr *pgd_current
1040 * vmalloc case = badvaddr swapper_pg_dir
1041 */
1042
1043 if (vmalloc_branch_delay_filled)
1044 /* get pgd offset in bytes */
1045 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1046
1047#ifdef __PAGETABLE_PMD_FOLDED
1048 GET_CONTEXT(p, tmp); /* get context reg */
1049#endif
1050 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1051
1052 if (use_lwx_insns()) {
1053 UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1054 } else {
1055 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1056 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1057 }
1058
1059#ifndef __PAGETABLE_PMD_FOLDED
1060 /* get pmd offset in bytes */
1061 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1062 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1063 GET_CONTEXT(p, tmp); /* get context reg */
1064
1065 if (use_lwx_insns()) {
1066 UASM_i_LWX(p, scratch, scratch, ptr);
1067 } else {
1068 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1069 UASM_i_LW(p, scratch, 0, ptr);
1070 }
1071#endif
1072 /* Adjust the context during the load latency. */
1073 build_adjust_context(p, tmp);
1074
1075#ifdef CONFIG_HUGETLB_PAGE
1076 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1077 /*
1078 * The in the LWX case we don't want to do the load in the
1079 * delay slot. It cannot issue in the same cycle and may be
1080 * speculative and unneeded.
1081 */
1082 if (use_lwx_insns())
1083 uasm_i_nop(p);
1084#endif /* CONFIG_HUGETLB_PAGE */
1085
1086
1087 /* build_update_entries */
1088 if (use_lwx_insns()) {
1089 even = ptr;
1090 odd = tmp;
1091 UASM_i_LWX(p, even, scratch, tmp);
1092 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1093 UASM_i_LWX(p, odd, scratch, tmp);
1094 } else {
1095 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1096 even = tmp;
1097 odd = ptr;
1098 UASM_i_LW(p, even, 0, ptr); /* get even pte */
1099 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1100 }
1101 if (kernel_uses_smartmips_rixi) {
1102 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
1103 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
1104 uasm_i_drotr(p, even, even,
1105 ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
1106 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1107 uasm_i_drotr(p, odd, odd,
1108 ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
1109 } else {
1110 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1111 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1112 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1113 }
1114 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1115
1116 if (c0_scratch >= 0) {
1117 UASM_i_MFC0(p, scratch, 31, c0_scratch);
1118 build_tlb_write_entry(p, l, r, tlb_random);
1119 uasm_l_leave(l, *p);
1120 rv.restore_scratch = 1;
1121 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
1122 build_tlb_write_entry(p, l, r, tlb_random);
1123 uasm_l_leave(l, *p);
1124 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1125 } else {
1126 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1127 build_tlb_write_entry(p, l, r, tlb_random);
1128 uasm_l_leave(l, *p);
1129 rv.restore_scratch = 1;
1130 }
1131
1132 uasm_i_eret(p); /* return from trap */
1133
1134 return rv;
1135}
1136
837/* 1137/*
838 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception 1138 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
839 * because EXL == 0. If we wrap, we can also use the 32 instruction 1139 * because EXL == 0. If we wrap, we can also use the 32 instruction
@@ -849,54 +1149,67 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
849 struct uasm_reloc *r = relocs; 1149 struct uasm_reloc *r = relocs;
850 u32 *f; 1150 u32 *f;
851 unsigned int final_len; 1151 unsigned int final_len;
1152 struct mips_huge_tlb_info htlb_info;
1153 enum vmalloc64_mode vmalloc_mode;
852 1154
853 memset(tlb_handler, 0, sizeof(tlb_handler)); 1155 memset(tlb_handler, 0, sizeof(tlb_handler));
854 memset(labels, 0, sizeof(labels)); 1156 memset(labels, 0, sizeof(labels));
855 memset(relocs, 0, sizeof(relocs)); 1157 memset(relocs, 0, sizeof(relocs));
856 memset(final_handler, 0, sizeof(final_handler)); 1158 memset(final_handler, 0, sizeof(final_handler));
857 1159
858 /* 1160 if (scratch_reg == 0)
859 * create the plain linear handler 1161 scratch_reg = allocate_kscratch();
860 */
861 if (bcm1250_m3_war()) {
862 unsigned int segbits = 44;
863 1162
864 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1163 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
865 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1164 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
866 uasm_i_xor(&p, K0, K0, K1); 1165 scratch_reg);
867 uasm_i_dsrl_safe(&p, K1, K0, 62); 1166 vmalloc_mode = refill_scratch;
868 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1167 } else {
869 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1168 htlb_info.huge_pte = K0;
870 uasm_i_or(&p, K0, K0, K1); 1169 htlb_info.restore_scratch = 0;
871 uasm_il_bnez(&p, &r, K0, label_leave); 1170 vmalloc_mode = refill_noscratch;
872 /* No need for uasm_i_nop */ 1171 /*
873 } 1172 * create the plain linear handler
1173 */
1174 if (bcm1250_m3_war()) {
1175 unsigned int segbits = 44;
1176
1177 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1178 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1179 uasm_i_xor(&p, K0, K0, K1);
1180 uasm_i_dsrl_safe(&p, K1, K0, 62);
1181 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1182 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1183 uasm_i_or(&p, K0, K0, K1);
1184 uasm_il_bnez(&p, &r, K0, label_leave);
1185 /* No need for uasm_i_nop */
1186 }
874 1187
875#ifdef CONFIG_64BIT 1188#ifdef CONFIG_64BIT
876 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1189 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
877#else 1190#else
878 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1191 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
879#endif 1192#endif
880 1193
881#ifdef CONFIG_HUGETLB_PAGE 1194#ifdef CONFIG_HUGETLB_PAGE
882 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1195 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
883#endif 1196#endif
884 1197
885 build_get_ptep(&p, K0, K1); 1198 build_get_ptep(&p, K0, K1);
886 build_update_entries(&p, K0, K1); 1199 build_update_entries(&p, K0, K1);
887 build_tlb_write_entry(&p, &l, &r, tlb_random); 1200 build_tlb_write_entry(&p, &l, &r, tlb_random);
888 uasm_l_leave(&l, p); 1201 uasm_l_leave(&l, p);
889 uasm_i_eret(&p); /* return from trap */ 1202 uasm_i_eret(&p); /* return from trap */
890 1203 }
891#ifdef CONFIG_HUGETLB_PAGE 1204#ifdef CONFIG_HUGETLB_PAGE
892 uasm_l_tlb_huge_update(&l, p); 1205 uasm_l_tlb_huge_update(&l, p);
893 UASM_i_LW(&p, K0, 0, K1); 1206 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
894 build_huge_update_entries(&p, K0, K1); 1207 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
895 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random); 1208 htlb_info.restore_scratch);
896#endif 1209#endif
897 1210
898#ifdef CONFIG_64BIT 1211#ifdef CONFIG_64BIT
899 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill); 1212 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
900#endif 1213#endif
901 1214
902 /* 1215 /*
@@ -1014,6 +1327,55 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
1014u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; 1327u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1015u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 1328u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1016u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 1329u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1330#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1331u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
1332
1333static void __cpuinit build_r4000_setup_pgd(void)
1334{
1335 const int a0 = 4;
1336 const int a1 = 5;
1337 u32 *p = tlbmiss_handler_setup_pgd;
1338 struct uasm_label *l = labels;
1339 struct uasm_reloc *r = relocs;
1340
1341 memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
1342 memset(labels, 0, sizeof(labels));
1343 memset(relocs, 0, sizeof(relocs));
1344
1345 pgd_reg = allocate_kscratch();
1346
1347 if (pgd_reg == -1) {
1348 /* PGD << 11 in c0_Context */
1349 /*
1350 * If it is a ckseg0 address, convert to a physical
1351 * address. Shifting right by 29 and adding 4 will
1352 * result in zero for these addresses.
1353 *
1354 */
1355 UASM_i_SRA(&p, a1, a0, 29);
1356 UASM_i_ADDIU(&p, a1, a1, 4);
1357 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1358 uasm_i_nop(&p);
1359 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1360 uasm_l_tlbl_goaround1(&l, p);
1361 UASM_i_SLL(&p, a0, a0, 11);
1362 uasm_i_jr(&p, 31);
1363 UASM_i_MTC0(&p, a0, C0_CONTEXT);
1364 } else {
1365 /* PGD in c0_KScratch */
1366 uasm_i_jr(&p, 31);
1367 UASM_i_MTC0(&p, a0, 31, pgd_reg);
1368 }
1369 if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
1370 panic("tlbmiss_handler_setup_pgd space exceeded");
1371 uasm_resolve_relocs(relocs, labels);
1372 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1373 (unsigned int)(p - tlbmiss_handler_setup_pgd));
1374
1375 dump_handler(tlbmiss_handler_setup_pgd,
1376 ARRAY_SIZE(tlbmiss_handler_setup_pgd));
1377}
1378#endif
1017 1379
1018static void __cpuinit 1380static void __cpuinit
1019iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1381iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
@@ -1100,14 +1462,20 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
1100 unsigned int pte, unsigned int ptr, enum label_id lid) 1462 unsigned int pte, unsigned int ptr, enum label_id lid)
1101{ 1463{
1102 if (kernel_uses_smartmips_rixi) { 1464 if (kernel_uses_smartmips_rixi) {
1103 uasm_i_andi(p, pte, pte, _PAGE_PRESENT); 1465 if (use_bbit_insns()) {
1104 uasm_il_beqz(p, r, pte, lid); 1466 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1467 uasm_i_nop(p);
1468 } else {
1469 uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
1470 uasm_il_beqz(p, r, pte, lid);
1471 iPTE_LW(p, pte, ptr);
1472 }
1105 } else { 1473 } else {
1106 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1474 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1107 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1475 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1108 uasm_il_bnez(p, r, pte, lid); 1476 uasm_il_bnez(p, r, pte, lid);
1477 iPTE_LW(p, pte, ptr);
1109 } 1478 }
1110 iPTE_LW(p, pte, ptr);
1111} 1479}
1112 1480
1113/* Make PTE valid, store result in PTR. */ 1481/* Make PTE valid, store result in PTR. */
@@ -1128,10 +1496,17 @@ static void __cpuinit
1128build_pte_writable(u32 **p, struct uasm_reloc **r, 1496build_pte_writable(u32 **p, struct uasm_reloc **r,
1129 unsigned int pte, unsigned int ptr, enum label_id lid) 1497 unsigned int pte, unsigned int ptr, enum label_id lid)
1130{ 1498{
1131 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1499 if (use_bbit_insns()) {
1132 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1500 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1133 uasm_il_bnez(p, r, pte, lid); 1501 uasm_i_nop(p);
1134 iPTE_LW(p, pte, ptr); 1502 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1503 uasm_i_nop(p);
1504 } else {
1505 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1506 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1507 uasm_il_bnez(p, r, pte, lid);
1508 iPTE_LW(p, pte, ptr);
1509 }
1135} 1510}
1136 1511
1137/* Make PTE writable, update software status bits as well, then store 1512/* Make PTE writable, update software status bits as well, then store
@@ -1155,12 +1530,19 @@ static void __cpuinit
1155build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1530build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1156 unsigned int pte, unsigned int ptr, enum label_id lid) 1531 unsigned int pte, unsigned int ptr, enum label_id lid)
1157{ 1532{
1158 uasm_i_andi(p, pte, pte, _PAGE_WRITE); 1533 if (use_bbit_insns()) {
1159 uasm_il_beqz(p, r, pte, lid); 1534 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1160 iPTE_LW(p, pte, ptr); 1535 uasm_i_nop(p);
1536 } else {
1537 uasm_i_andi(p, pte, pte, _PAGE_WRITE);
1538 uasm_il_beqz(p, r, pte, lid);
1539 iPTE_LW(p, pte, ptr);
1540 }
1161} 1541}
1162 1542
1163#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 1543#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1544
1545
1164/* 1546/*
1165 * R3000 style TLB load/store/modify handlers. 1547 * R3000 style TLB load/store/modify handlers.
1166 */ 1548 */
@@ -1402,14 +1784,23 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1402 * If the page is not _PAGE_VALID, RI or XI could not 1784 * If the page is not _PAGE_VALID, RI or XI could not
1403 * have triggered it. Skip the expensive test.. 1785 * have triggered it. Skip the expensive test..
1404 */ 1786 */
1405 uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1787 if (use_bbit_insns()) {
1406 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); 1788 uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
1789 label_tlbl_goaround1);
1790 } else {
1791 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1792 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
1793 }
1407 uasm_i_nop(&p); 1794 uasm_i_nop(&p);
1408 1795
1409 uasm_i_tlbr(&p); 1796 uasm_i_tlbr(&p);
1410 /* Examine entrylo 0 or 1 based on ptr. */ 1797 /* Examine entrylo 0 or 1 based on ptr. */
1411 uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1798 if (use_bbit_insns()) {
1412 uasm_i_beqz(&p, K0, 8); 1799 uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
1800 } else {
1801 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1802 uasm_i_beqz(&p, K0, 8);
1803 }
1413 1804
1414 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1805 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1415 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1806 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
@@ -1417,12 +1808,18 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1417 * If the entryLo (now in K0) is valid (bit 1), RI or 1808 * If the entryLo (now in K0) is valid (bit 1), RI or
1418 * XI must have triggered it. 1809 * XI must have triggered it.
1419 */ 1810 */
1420 uasm_i_andi(&p, K0, K0, 2); 1811 if (use_bbit_insns()) {
1421 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); 1812 uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl);
1422 1813 /* Reload the PTE value */
1423 uasm_l_tlbl_goaround1(&l, p); 1814 iPTE_LW(&p, K0, K1);
1424 /* Reload the PTE value */ 1815 uasm_l_tlbl_goaround1(&l, p);
1425 iPTE_LW(&p, K0, K1); 1816 } else {
1817 uasm_i_andi(&p, K0, K0, 2);
1818 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
1819 uasm_l_tlbl_goaround1(&l, p);
1820 /* Reload the PTE value */
1821 iPTE_LW(&p, K0, K1);
1822 }
1426 } 1823 }
1427 build_make_valid(&p, &r, K0, K1); 1824 build_make_valid(&p, &r, K0, K1);
1428 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1825 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
@@ -1442,23 +1839,35 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1442 * If the page is not _PAGE_VALID, RI or XI could not 1839 * If the page is not _PAGE_VALID, RI or XI could not
1443 * have triggered it. Skip the expensive test.. 1840 * have triggered it. Skip the expensive test..
1444 */ 1841 */
1445 uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1842 if (use_bbit_insns()) {
1446 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1843 uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
1844 label_tlbl_goaround2);
1845 } else {
1846 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1847 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1848 }
1447 uasm_i_nop(&p); 1849 uasm_i_nop(&p);
1448 1850
1449 uasm_i_tlbr(&p); 1851 uasm_i_tlbr(&p);
1450 /* Examine entrylo 0 or 1 based on ptr. */ 1852 /* Examine entrylo 0 or 1 based on ptr. */
1451 uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1853 if (use_bbit_insns()) {
1452 uasm_i_beqz(&p, K0, 8); 1854 uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
1453 1855 } else {
1856 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1857 uasm_i_beqz(&p, K0, 8);
1858 }
1454 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1859 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1455 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1860 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1456 /* 1861 /*
1457 * If the entryLo (now in K0) is valid (bit 1), RI or 1862 * If the entryLo (now in K0) is valid (bit 1), RI or
1458 * XI must have triggered it. 1863 * XI must have triggered it.
1459 */ 1864 */
1460 uasm_i_andi(&p, K0, K0, 2); 1865 if (use_bbit_insns()) {
1461 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1866 uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2);
1867 } else {
1868 uasm_i_andi(&p, K0, K0, 2);
1869 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1870 }
1462 /* Reload the PTE value */ 1871 /* Reload the PTE value */
1463 iPTE_LW(&p, K0, K1); 1872 iPTE_LW(&p, K0, K1);
1464 1873
@@ -1466,7 +1875,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1466 * We clobbered C0_PAGEMASK, restore it. On the other branch 1875 * We clobbered C0_PAGEMASK, restore it. On the other branch
1467 * it is restored in build_huge_tlb_write_entry. 1876 * it is restored in build_huge_tlb_write_entry.
1468 */ 1877 */
1469 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl); 1878 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0);
1470 1879
1471 uasm_l_tlbl_goaround2(&l, p); 1880 uasm_l_tlbl_goaround2(&l, p);
1472 } 1881 }
@@ -1623,13 +2032,16 @@ void __cpuinit build_tlb_refill_handler(void)
1623 break; 2032 break;
1624 2033
1625 default: 2034 default:
1626 build_r4000_tlb_refill_handler();
1627 if (!run_once) { 2035 if (!run_once) {
2036#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2037 build_r4000_setup_pgd();
2038#endif
1628 build_r4000_tlb_load_handler(); 2039 build_r4000_tlb_load_handler();
1629 build_r4000_tlb_store_handler(); 2040 build_r4000_tlb_store_handler();
1630 build_r4000_tlb_modify_handler(); 2041 build_r4000_tlb_modify_handler();
1631 run_once++; 2042 run_once++;
1632 } 2043 }
2044 build_r4000_tlb_refill_handler();
1633 } 2045 }
1634} 2046}
1635 2047
@@ -1641,4 +2053,8 @@ void __cpuinit flush_tlb_handlers(void)
1641 (unsigned long)handle_tlbs + sizeof(handle_tlbs)); 2053 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
1642 local_flush_icache_range((unsigned long)handle_tlbm, 2054 local_flush_icache_range((unsigned long)handle_tlbm,
1643 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 2055 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2056#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2057 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2058 (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
2059#endif
1644} 2060}