aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c282
1 files changed, 244 insertions, 38 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0615b62efd6d..8f606ead826e 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -6,8 +6,9 @@
6 * Synthesize TLB refill handlers at runtime. 6 * Synthesize TLB refill handlers at runtime.
7 * 7 *
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007 Maciej W. Rozycki 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
11 * 12 *
12 * ... and the days got worse and worse and now you see 13 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind. 14 * I've gone completly out of my mind.
@@ -19,6 +20,7 @@
19 * (Condolences to Napoleon XIV) 20 * (Condolences to Napoleon XIV)
20 */ 21 */
21 22
23#include <linux/bug.h>
22#include <linux/kernel.h> 24#include <linux/kernel.h>
23#include <linux/types.h> 25#include <linux/types.h>
24#include <linux/string.h> 26#include <linux/string.h>
@@ -82,6 +84,9 @@ enum label_id {
82 label_nopage_tlbm, 84 label_nopage_tlbm,
83 label_smp_pgtable_change, 85 label_smp_pgtable_change,
84 label_r3000_write_probe_fail, 86 label_r3000_write_probe_fail,
87#ifdef CONFIG_HUGETLB_PAGE
88 label_tlb_huge_update,
89#endif
85}; 90};
86 91
87UASM_L_LA(_second_part) 92UASM_L_LA(_second_part)
@@ -98,6 +103,9 @@ UASM_L_LA(_nopage_tlbs)
98UASM_L_LA(_nopage_tlbm) 103UASM_L_LA(_nopage_tlbm)
99UASM_L_LA(_smp_pgtable_change) 104UASM_L_LA(_smp_pgtable_change)
100UASM_L_LA(_r3000_write_probe_fail) 105UASM_L_LA(_r3000_write_probe_fail)
106#ifdef CONFIG_HUGETLB_PAGE
107UASM_L_LA(_tlb_huge_update)
108#endif
101 109
102/* 110/*
103 * For debug purposes. 111 * For debug purposes.
@@ -125,6 +133,7 @@ static inline void dump_handler(const u32 *handler, int count)
125#define C0_TCBIND 2, 2 133#define C0_TCBIND 2, 2
126#define C0_ENTRYLO1 3, 0 134#define C0_ENTRYLO1 3, 0
127#define C0_CONTEXT 4, 0 135#define C0_CONTEXT 4, 0
136#define C0_PAGEMASK 5, 0
128#define C0_BADVADDR 8, 0 137#define C0_BADVADDR 8, 0
129#define C0_ENTRYHI 10, 0 138#define C0_ENTRYHI 10, 0
130#define C0_EPC 14, 0 139#define C0_EPC 14, 0
@@ -258,7 +267,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
258 } 267 }
259 268
260 if (cpu_has_mips_r2) { 269 if (cpu_has_mips_r2) {
261 uasm_i_ehb(p); 270 if (cpu_has_mips_r2_exec_hazard)
271 uasm_i_ehb(p);
262 tlbw(p); 272 tlbw(p);
263 return; 273 return;
264 } 274 }
@@ -310,7 +320,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
310 case CPU_BCM3302: 320 case CPU_BCM3302:
311 case CPU_BCM4710: 321 case CPU_BCM4710:
312 case CPU_LOONGSON2: 322 case CPU_LOONGSON2:
313 case CPU_CAVIUM_OCTEON:
314 case CPU_R5500: 323 case CPU_R5500:
315 if (m4kc_tlbp_war()) 324 if (m4kc_tlbp_war())
316 uasm_i_nop(p); 325 uasm_i_nop(p);
@@ -382,6 +391,98 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
382 } 391 }
383} 392}
384 393
394#ifdef CONFIG_HUGETLB_PAGE
395static __cpuinit void build_huge_tlb_write_entry(u32 **p,
396 struct uasm_label **l,
397 struct uasm_reloc **r,
398 unsigned int tmp,
399 enum tlb_write_entry wmode)
400{
401 /* Set huge page tlb entry size */
402 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
403 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
404 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
405
406 build_tlb_write_entry(p, l, r, wmode);
407
408 /* Reset default page size */
409 if (PM_DEFAULT_MASK >> 16) {
410 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
411 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
412 uasm_il_b(p, r, label_leave);
413 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
414 } else if (PM_DEFAULT_MASK) {
415 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
416 uasm_il_b(p, r, label_leave);
417 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
418 } else {
419 uasm_il_b(p, r, label_leave);
420 uasm_i_mtc0(p, 0, C0_PAGEMASK);
421 }
422}
423
424/*
425 * Check if Huge PTE is present, if so then jump to LABEL.
426 */
427static void __cpuinit
428build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
429 unsigned int pmd, int lid)
430{
431 UASM_i_LW(p, tmp, 0, pmd);
432 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
433 uasm_il_bnez(p, r, tmp, lid);
434}
435
436static __cpuinit void build_huge_update_entries(u32 **p,
437 unsigned int pte,
438 unsigned int tmp)
439{
440 int small_sequence;
441
442 /*
443 * A huge PTE describes an area the size of the
444 * configured huge page size. This is twice the
445 * of the large TLB entry size we intend to use.
446 * A TLB entry half the size of the configured
447 * huge page size is configured into entrylo0
448 * and entrylo1 to cover the contiguous huge PTE
449 * address space.
450 */
451 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
452
453 /* We can clobber tmp. It isn't used after this.*/
454 if (!small_sequence)
455 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
456
457 UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
458 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
459 /* convert to entrylo1 */
460 if (small_sequence)
461 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
462 else
463 UASM_i_ADDU(p, pte, pte, tmp);
464
465 uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
466}
467
468static __cpuinit void build_huge_handler_tail(u32 **p,
469 struct uasm_reloc **r,
470 struct uasm_label **l,
471 unsigned int pte,
472 unsigned int ptr)
473{
474#ifdef CONFIG_SMP
475 UASM_i_SC(p, pte, 0, ptr);
476 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
477 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
478#else
479 UASM_i_SW(p, pte, 0, ptr);
480#endif
481 build_huge_update_entries(p, pte, ptr);
482 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
483}
484#endif /* CONFIG_HUGETLB_PAGE */
485
385#ifdef CONFIG_64BIT 486#ifdef CONFIG_64BIT
386/* 487/*
387 * TMP and PTR are scratch. 488 * TMP and PTR are scratch.
@@ -649,6 +750,14 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
649#endif 750#endif
650} 751}
651 752
753/*
754 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
755 * because EXL == 0. If we wrap, we can also use the 32 instruction
756 * slots before the XTLB refill exception handler which belong to the
757 * unused TLB refill exception.
758 */
759#define MIPS64_REFILL_INSNS 32
760
652static void __cpuinit build_r4000_tlb_refill_handler(void) 761static void __cpuinit build_r4000_tlb_refill_handler(void)
653{ 762{
654 u32 *p = tlb_handler; 763 u32 *p = tlb_handler;
@@ -680,12 +789,23 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
680 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 789 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
681#endif 790#endif
682 791
792#ifdef CONFIG_HUGETLB_PAGE
793 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
794#endif
795
683 build_get_ptep(&p, K0, K1); 796 build_get_ptep(&p, K0, K1);
684 build_update_entries(&p, K0, K1); 797 build_update_entries(&p, K0, K1);
685 build_tlb_write_entry(&p, &l, &r, tlb_random); 798 build_tlb_write_entry(&p, &l, &r, tlb_random);
686 uasm_l_leave(&l, p); 799 uasm_l_leave(&l, p);
687 uasm_i_eret(&p); /* return from trap */ 800 uasm_i_eret(&p); /* return from trap */
688 801
802#ifdef CONFIG_HUGETLB_PAGE
803 uasm_l_tlb_huge_update(&l, p);
804 UASM_i_LW(&p, K0, 0, K1);
805 build_huge_update_entries(&p, K0, K1);
806 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
807#endif
808
689#ifdef CONFIG_64BIT 809#ifdef CONFIG_64BIT
690 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); 810 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
691#endif 811#endif
@@ -702,9 +822,10 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
702 if ((p - tlb_handler) > 64) 822 if ((p - tlb_handler) > 64)
703 panic("TLB refill handler space exceeded"); 823 panic("TLB refill handler space exceeded");
704#else 824#else
705 if (((p - tlb_handler) > 63) 825 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
706 || (((p - tlb_handler) > 61) 826 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
707 && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) 827 && uasm_insn_has_bdelay(relocs,
828 tlb_handler + MIPS64_REFILL_INSNS - 3)))
708 panic("TLB refill handler space exceeded"); 829 panic("TLB refill handler space exceeded");
709#endif 830#endif
710 831
@@ -717,39 +838,74 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
717 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 838 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
718 final_len = p - tlb_handler; 839 final_len = p - tlb_handler;
719#else /* CONFIG_64BIT */ 840#else /* CONFIG_64BIT */
720 f = final_handler + 32; 841 f = final_handler + MIPS64_REFILL_INSNS;
721 if ((p - tlb_handler) <= 32) { 842 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
722 /* Just copy the handler. */ 843 /* Just copy the handler. */
723 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 844 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
724 final_len = p - tlb_handler; 845 final_len = p - tlb_handler;
725 } else { 846 } else {
726 u32 *split = tlb_handler + 30; 847#if defined(CONFIG_HUGETLB_PAGE)
848 const enum label_id ls = label_tlb_huge_update;
849#elif defined(MODULE_START)
850 const enum label_id ls = label_module_alloc;
851#else
852 const enum label_id ls = label_vmalloc;
853#endif
854 u32 *split;
855 int ov = 0;
856 int i;
857
858 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
859 ;
860 BUG_ON(i == ARRAY_SIZE(labels));
861 split = labels[i].addr;
727 862
728 /* 863 /*
729 * Find the split point. 864 * See if we have overflown one way or the other.
730 */ 865 */
731 if (uasm_insn_has_bdelay(relocs, split - 1)) 866 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
732 split--; 867 split < p - MIPS64_REFILL_INSNS)
733 868 ov = 1;
869
870 if (ov) {
871 /*
872 * Split two instructions before the end. One
873 * for the branch and one for the instruction
874 * in the delay slot.
875 */
876 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
877
878 /*
879 * If the branch would fall in a delay slot,
880 * we must back up an additional instruction
881 * so that it is no longer in a delay slot.
882 */
883 if (uasm_insn_has_bdelay(relocs, split - 1))
884 split--;
885 }
734 /* Copy first part of the handler. */ 886 /* Copy first part of the handler. */
735 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 887 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
736 f += split - tlb_handler; 888 f += split - tlb_handler;
737 889
738 /* Insert branch. */ 890 if (ov) {
739 uasm_l_split(&l, final_handler); 891 /* Insert branch. */
740 uasm_il_b(&f, &r, label_split); 892 uasm_l_split(&l, final_handler);
741 if (uasm_insn_has_bdelay(relocs, split)) 893 uasm_il_b(&f, &r, label_split);
742 uasm_i_nop(&f); 894 if (uasm_insn_has_bdelay(relocs, split))
743 else { 895 uasm_i_nop(&f);
744 uasm_copy_handler(relocs, labels, split, split + 1, f); 896 else {
745 uasm_move_labels(labels, f, f + 1, -1); 897 uasm_copy_handler(relocs, labels,
746 f++; 898 split, split + 1, f);
747 split++; 899 uasm_move_labels(labels, f, f + 1, -1);
900 f++;
901 split++;
902 }
748 } 903 }
749 904
750 /* Copy the rest of the handler. */ 905 /* Copy the rest of the handler. */
751 uasm_copy_handler(relocs, labels, split, p, final_handler); 906 uasm_copy_handler(relocs, labels, split, p, final_handler);
752 final_len = (f - (final_handler + 32)) + (p - split); 907 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
908 (p - split);
753 } 909 }
754#endif /* CONFIG_64BIT */ 910#endif /* CONFIG_64BIT */
755 911
@@ -782,7 +938,7 @@ u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
782u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 938u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
783 939
784static void __cpuinit 940static void __cpuinit
785iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) 941iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
786{ 942{
787#ifdef CONFIG_SMP 943#ifdef CONFIG_SMP
788# ifdef CONFIG_64BIT_PHYS_ADDR 944# ifdef CONFIG_64BIT_PHYS_ADDR
@@ -862,13 +1018,13 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
862 * with it's original value. 1018 * with it's original value.
863 */ 1019 */
864static void __cpuinit 1020static void __cpuinit
865build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 1021build_pte_present(u32 **p, struct uasm_reloc **r,
866 unsigned int pte, unsigned int ptr, enum label_id lid) 1022 unsigned int pte, unsigned int ptr, enum label_id lid)
867{ 1023{
868 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1024 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
869 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1025 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
870 uasm_il_bnez(p, r, pte, lid); 1026 uasm_il_bnez(p, r, pte, lid);
871 iPTE_LW(p, l, pte, ptr); 1027 iPTE_LW(p, pte, ptr);
872} 1028}
873 1029
874/* Make PTE valid, store result in PTR. */ 1030/* Make PTE valid, store result in PTR. */
@@ -886,13 +1042,13 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
886 * restore PTE with value from PTR when done. 1042 * restore PTE with value from PTR when done.
887 */ 1043 */
888static void __cpuinit 1044static void __cpuinit
889build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 1045build_pte_writable(u32 **p, struct uasm_reloc **r,
890 unsigned int pte, unsigned int ptr, enum label_id lid) 1046 unsigned int pte, unsigned int ptr, enum label_id lid)
891{ 1047{
892 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1048 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
893 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1049 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
894 uasm_il_bnez(p, r, pte, lid); 1050 uasm_il_bnez(p, r, pte, lid);
895 iPTE_LW(p, l, pte, ptr); 1051 iPTE_LW(p, pte, ptr);
896} 1052}
897 1053
898/* Make PTE writable, update software status bits as well, then store 1054/* Make PTE writable, update software status bits as well, then store
@@ -913,12 +1069,12 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
913 * restore PTE with value from PTR when done. 1069 * restore PTE with value from PTR when done.
914 */ 1070 */
915static void __cpuinit 1071static void __cpuinit
916build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 1072build_pte_modifiable(u32 **p, struct uasm_reloc **r,
917 unsigned int pte, unsigned int ptr, enum label_id lid) 1073 unsigned int pte, unsigned int ptr, enum label_id lid)
918{ 1074{
919 uasm_i_andi(p, pte, pte, _PAGE_WRITE); 1075 uasm_i_andi(p, pte, pte, _PAGE_WRITE);
920 uasm_il_beqz(p, r, pte, lid); 1076 uasm_il_beqz(p, r, pte, lid);
921 iPTE_LW(p, l, pte, ptr); 1077 iPTE_LW(p, pte, ptr);
922} 1078}
923 1079
924/* 1080/*
@@ -994,7 +1150,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
994 memset(relocs, 0, sizeof(relocs)); 1150 memset(relocs, 0, sizeof(relocs));
995 1151
996 build_r3000_tlbchange_handler_head(&p, K0, K1); 1152 build_r3000_tlbchange_handler_head(&p, K0, K1);
997 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); 1153 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
998 uasm_i_nop(&p); /* load delay */ 1154 uasm_i_nop(&p); /* load delay */
999 build_make_valid(&p, &r, K0, K1); 1155 build_make_valid(&p, &r, K0, K1);
1000 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1156 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1024,7 +1180,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1024 memset(relocs, 0, sizeof(relocs)); 1180 memset(relocs, 0, sizeof(relocs));
1025 1181
1026 build_r3000_tlbchange_handler_head(&p, K0, K1); 1182 build_r3000_tlbchange_handler_head(&p, K0, K1);
1027 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); 1183 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1028 uasm_i_nop(&p); /* load delay */ 1184 uasm_i_nop(&p); /* load delay */
1029 build_make_write(&p, &r, K0, K1); 1185 build_make_write(&p, &r, K0, K1);
1030 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1186 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1054,7 +1210,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1054 memset(relocs, 0, sizeof(relocs)); 1210 memset(relocs, 0, sizeof(relocs));
1055 1211
1056 build_r3000_tlbchange_handler_head(&p, K0, K1); 1212 build_r3000_tlbchange_handler_head(&p, K0, K1);
1057 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); 1213 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1058 uasm_i_nop(&p); /* load delay */ 1214 uasm_i_nop(&p); /* load delay */
1059 build_make_write(&p, &r, K0, K1); 1215 build_make_write(&p, &r, K0, K1);
1060 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1216 build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1087,6 +1243,15 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1087 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ 1243 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
1088#endif 1244#endif
1089 1245
1246#ifdef CONFIG_HUGETLB_PAGE
1247 /*
1248 * For huge tlb entries, pmd doesn't contain an address but
1249 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1250 * see if we need to jump to huge tlb processing.
1251 */
1252 build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
1253#endif
1254
1090 UASM_i_MFC0(p, pte, C0_BADVADDR); 1255 UASM_i_MFC0(p, pte, C0_BADVADDR);
1091 UASM_i_LW(p, ptr, 0, ptr); 1256 UASM_i_LW(p, ptr, 0, ptr);
1092 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1257 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
@@ -1096,7 +1261,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1096#ifdef CONFIG_SMP 1261#ifdef CONFIG_SMP
1097 uasm_l_smp_pgtable_change(l, *p); 1262 uasm_l_smp_pgtable_change(l, *p);
1098#endif 1263#endif
1099 iPTE_LW(p, l, pte, ptr); /* get even pte */ 1264 iPTE_LW(p, pte, ptr); /* get even pte */
1100 if (!m4kc_tlbp_war()) 1265 if (!m4kc_tlbp_war())
1101 build_tlb_probe_entry(p); 1266 build_tlb_probe_entry(p);
1102} 1267}
@@ -1138,12 +1303,25 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1138 } 1303 }
1139 1304
1140 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1305 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1141 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); 1306 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1142 if (m4kc_tlbp_war()) 1307 if (m4kc_tlbp_war())
1143 build_tlb_probe_entry(&p); 1308 build_tlb_probe_entry(&p);
1144 build_make_valid(&p, &r, K0, K1); 1309 build_make_valid(&p, &r, K0, K1);
1145 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1310 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1146 1311
1312#ifdef CONFIG_HUGETLB_PAGE
1313 /*
1314 * This is the entry point when build_r4000_tlbchange_handler_head
1315 * spots a huge page.
1316 */
1317 uasm_l_tlb_huge_update(&l, p);
1318 iPTE_LW(&p, K0, K1);
1319 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1320 build_tlb_probe_entry(&p);
1321 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1322 build_huge_handler_tail(&p, &r, &l, K0, K1);
1323#endif
1324
1147 uasm_l_nopage_tlbl(&l, p); 1325 uasm_l_nopage_tlbl(&l, p);
1148 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1326 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1149 uasm_i_nop(&p); 1327 uasm_i_nop(&p);
@@ -1169,12 +1347,26 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
1169 memset(relocs, 0, sizeof(relocs)); 1347 memset(relocs, 0, sizeof(relocs));
1170 1348
1171 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1349 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1172 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); 1350 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1173 if (m4kc_tlbp_war()) 1351 if (m4kc_tlbp_war())
1174 build_tlb_probe_entry(&p); 1352 build_tlb_probe_entry(&p);
1175 build_make_write(&p, &r, K0, K1); 1353 build_make_write(&p, &r, K0, K1);
1176 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1354 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1177 1355
1356#ifdef CONFIG_HUGETLB_PAGE
1357 /*
1358 * This is the entry point when
1359 * build_r4000_tlbchange_handler_head spots a huge page.
1360 */
1361 uasm_l_tlb_huge_update(&l, p);
1362 iPTE_LW(&p, K0, K1);
1363 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1364 build_tlb_probe_entry(&p);
1365 uasm_i_ori(&p, K0, K0,
1366 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1367 build_huge_handler_tail(&p, &r, &l, K0, K1);
1368#endif
1369
1178 uasm_l_nopage_tlbs(&l, p); 1370 uasm_l_nopage_tlbs(&l, p);
1179 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1371 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1180 uasm_i_nop(&p); 1372 uasm_i_nop(&p);
@@ -1200,13 +1392,27 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
1200 memset(relocs, 0, sizeof(relocs)); 1392 memset(relocs, 0, sizeof(relocs));
1201 1393
1202 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1394 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1203 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); 1395 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1204 if (m4kc_tlbp_war()) 1396 if (m4kc_tlbp_war())
1205 build_tlb_probe_entry(&p); 1397 build_tlb_probe_entry(&p);
1206 /* Present and writable bits set, set accessed and dirty bits. */ 1398 /* Present and writable bits set, set accessed and dirty bits. */
1207 build_make_write(&p, &r, K0, K1); 1399 build_make_write(&p, &r, K0, K1);
1208 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1400 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1209 1401
1402#ifdef CONFIG_HUGETLB_PAGE
1403 /*
1404 * This is the entry point when
1405 * build_r4000_tlbchange_handler_head spots a huge page.
1406 */
1407 uasm_l_tlb_huge_update(&l, p);
1408 iPTE_LW(&p, K0, K1);
1409 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1410 build_tlb_probe_entry(&p);
1411 uasm_i_ori(&p, K0, K0,
1412 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1413 build_huge_handler_tail(&p, &r, &l, K0, K1);
1414#endif
1415
1210 uasm_l_nopage_tlbm(&l, p); 1416 uasm_l_nopage_tlbm(&l, p);
1211 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1417 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1212 uasm_i_nop(&p); 1418 uasm_i_nop(&p);