diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/cache.c | 53 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 27 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 19 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 169 |
5 files changed, 220 insertions, 50 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index e716cafc346d..be8627bc5b02 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
@@ -137,22 +137,43 @@ EXPORT_SYMBOL_GPL(_page_cachable_default); | |||
137 | 137 | ||
138 | static inline void setup_protection_map(void) | 138 | static inline void setup_protection_map(void) |
139 | { | 139 | { |
140 | protection_map[0] = PAGE_NONE; | 140 | if (kernel_uses_smartmips_rixi) { |
141 | protection_map[1] = PAGE_READONLY; | 141 | protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); |
142 | protection_map[2] = PAGE_COPY; | 142 | protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); |
143 | protection_map[3] = PAGE_COPY; | 143 | protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); |
144 | protection_map[4] = PAGE_READONLY; | 144 | protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); |
145 | protection_map[5] = PAGE_READONLY; | 145 | protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); |
146 | protection_map[6] = PAGE_COPY; | 146 | protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); |
147 | protection_map[7] = PAGE_COPY; | 147 | protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); |
148 | protection_map[8] = PAGE_NONE; | 148 | protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); |
149 | protection_map[9] = PAGE_READONLY; | 149 | |
150 | protection_map[10] = PAGE_SHARED; | 150 | protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); |
151 | protection_map[11] = PAGE_SHARED; | 151 | protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); |
152 | protection_map[12] = PAGE_READONLY; | 152 | protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); |
153 | protection_map[13] = PAGE_READONLY; | 153 | protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); |
154 | protection_map[14] = PAGE_SHARED; | 154 | protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); |
155 | protection_map[15] = PAGE_SHARED; | 155 | protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); |
156 | protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); | ||
157 | protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); | ||
158 | |||
159 | } else { | ||
160 | protection_map[0] = PAGE_NONE; | ||
161 | protection_map[1] = PAGE_READONLY; | ||
162 | protection_map[2] = PAGE_COPY; | ||
163 | protection_map[3] = PAGE_COPY; | ||
164 | protection_map[4] = PAGE_READONLY; | ||
165 | protection_map[5] = PAGE_READONLY; | ||
166 | protection_map[6] = PAGE_COPY; | ||
167 | protection_map[7] = PAGE_COPY; | ||
168 | protection_map[8] = PAGE_NONE; | ||
169 | protection_map[9] = PAGE_READONLY; | ||
170 | protection_map[10] = PAGE_SHARED; | ||
171 | protection_map[11] = PAGE_SHARED; | ||
172 | protection_map[12] = PAGE_READONLY; | ||
173 | protection_map[13] = PAGE_READONLY; | ||
174 | protection_map[14] = PAGE_SHARED; | ||
175 | protection_map[15] = PAGE_SHARED; | ||
176 | } | ||
156 | } | 177 | } |
157 | 178 | ||
158 | void __cpuinit cpu_cache_init(void) | 179 | void __cpuinit cpu_cache_init(void) |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index e97a7a2fb2c0..b78f7d913ca4 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -99,8 +99,31 @@ good_area: | |||
99 | if (!(vma->vm_flags & VM_WRITE)) | 99 | if (!(vma->vm_flags & VM_WRITE)) |
100 | goto bad_area; | 100 | goto bad_area; |
101 | } else { | 101 | } else { |
102 | if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) | 102 | if (kernel_uses_smartmips_rixi) { |
103 | goto bad_area; | 103 | if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { |
104 | #if 0 | ||
105 | pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n", | ||
106 | raw_smp_processor_id(), | ||
107 | current->comm, current->pid, | ||
108 | field, address, write, | ||
109 | field, regs->cp0_epc); | ||
110 | #endif | ||
111 | goto bad_area; | ||
112 | } | ||
113 | if (!(vma->vm_flags & VM_READ)) { | ||
114 | #if 0 | ||
115 | pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", | ||
116 | raw_smp_processor_id(), | ||
117 | current->comm, current->pid, | ||
118 | field, address, write, | ||
119 | field, regs->cp0_epc); | ||
120 | #endif | ||
121 | goto bad_area; | ||
122 | } | ||
123 | } else { | ||
124 | if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) | ||
125 | goto bad_area; | ||
126 | } | ||
104 | } | 127 | } |
105 | 128 | ||
106 | /* | 129 | /* |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 3c5b7de10af5..f34c26439a32 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -143,7 +143,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
143 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 143 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
144 | entrylo = pte.pte_high; | 144 | entrylo = pte.pte_high; |
145 | #else | 145 | #else |
146 | entrylo = pte_val(pte) >> 6; | 146 | entrylo = pte_to_entrylo(pte_val(pte)); |
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | ENTER_CRITICAL(flags); | 149 | ENTER_CRITICAL(flags); |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 21d04dfa11db..c618eed933a1 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -303,7 +303,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
303 | unsigned long lo; | 303 | unsigned long lo; |
304 | write_c0_pagemask(PM_HUGE_MASK); | 304 | write_c0_pagemask(PM_HUGE_MASK); |
305 | ptep = (pte_t *)pmdp; | 305 | ptep = (pte_t *)pmdp; |
306 | lo = pte_val(*ptep) >> 6; | 306 | lo = pte_to_entrylo(pte_val(*ptep)); |
307 | write_c0_entrylo0(lo); | 307 | write_c0_entrylo0(lo); |
308 | write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); | 308 | write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); |
309 | 309 | ||
@@ -323,8 +323,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
323 | ptep++; | 323 | ptep++; |
324 | write_c0_entrylo1(ptep->pte_high); | 324 | write_c0_entrylo1(ptep->pte_high); |
325 | #else | 325 | #else |
326 | write_c0_entrylo0(pte_val(*ptep++) >> 6); | 326 | write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); |
327 | write_c0_entrylo1(pte_val(*ptep) >> 6); | 327 | write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); |
328 | #endif | 328 | #endif |
329 | mtc0_tlbw_hazard(); | 329 | mtc0_tlbw_hazard(); |
330 | if (idx < 0) | 330 | if (idx < 0) |
@@ -437,6 +437,19 @@ void __cpuinit tlb_init(void) | |||
437 | current_cpu_type() == CPU_R12000 || | 437 | current_cpu_type() == CPU_R12000 || |
438 | current_cpu_type() == CPU_R14000) | 438 | current_cpu_type() == CPU_R14000) |
439 | write_c0_framemask(0); | 439 | write_c0_framemask(0); |
440 | |||
441 | if (kernel_uses_smartmips_rixi) { | ||
442 | /* | ||
443 | * Enable the no read, no exec bits, and enable large virtual | ||
444 | * address. | ||
445 | */ | ||
446 | u32 pg = PG_RIE | PG_XIE; | ||
447 | #ifdef CONFIG_64BIT | ||
448 | pg |= PG_ELPA; | ||
449 | #endif | ||
450 | write_c0_pagegrain(pg); | ||
451 | } | ||
452 | |||
440 | temp_tlb_entry = current_cpu_data.tlbsize - 1; | 453 | temp_tlb_entry = current_cpu_data.tlbsize - 1; |
441 | 454 | ||
442 | /* From this point on the ARC firmware is dead. */ | 455 | /* From this point on the ARC firmware is dead. */ |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4a2907c59569..0de0e4127d66 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -76,6 +76,8 @@ enum label_id { | |||
76 | label_vmalloc_done, | 76 | label_vmalloc_done, |
77 | label_tlbw_hazard, | 77 | label_tlbw_hazard, |
78 | label_split, | 78 | label_split, |
79 | label_tlbl_goaround1, | ||
80 | label_tlbl_goaround2, | ||
79 | label_nopage_tlbl, | 81 | label_nopage_tlbl, |
80 | label_nopage_tlbs, | 82 | label_nopage_tlbs, |
81 | label_nopage_tlbm, | 83 | label_nopage_tlbm, |
@@ -92,6 +94,8 @@ UASM_L_LA(_vmalloc) | |||
92 | UASM_L_LA(_vmalloc_done) | 94 | UASM_L_LA(_vmalloc_done) |
93 | UASM_L_LA(_tlbw_hazard) | 95 | UASM_L_LA(_tlbw_hazard) |
94 | UASM_L_LA(_split) | 96 | UASM_L_LA(_split) |
97 | UASM_L_LA(_tlbl_goaround1) | ||
98 | UASM_L_LA(_tlbl_goaround2) | ||
95 | UASM_L_LA(_nopage_tlbl) | 99 | UASM_L_LA(_nopage_tlbl) |
96 | UASM_L_LA(_nopage_tlbs) | 100 | UASM_L_LA(_nopage_tlbs) |
97 | UASM_L_LA(_nopage_tlbm) | 101 | UASM_L_LA(_nopage_tlbm) |
@@ -396,36 +400,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
396 | } | 400 | } |
397 | } | 401 | } |
398 | 402 | ||
399 | #ifdef CONFIG_HUGETLB_PAGE | 403 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
400 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | 404 | unsigned int reg) |
401 | struct uasm_label **l, | ||
402 | struct uasm_reloc **r, | ||
403 | unsigned int tmp, | ||
404 | enum tlb_write_entry wmode) | ||
405 | { | 405 | { |
406 | /* Set huge page tlb entry size */ | 406 | if (kernel_uses_smartmips_rixi) { |
407 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | 407 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); |
408 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | 408 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
409 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 409 | } else { |
410 | #ifdef CONFIG_64BIT_PHYS_ADDR | ||
411 | uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
412 | #else | ||
413 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
414 | #endif | ||
415 | } | ||
416 | } | ||
410 | 417 | ||
411 | build_tlb_write_entry(p, l, r, wmode); | 418 | #ifdef CONFIG_HUGETLB_PAGE |
412 | 419 | ||
420 | static __cpuinit void build_restore_pagemask(u32 **p, | ||
421 | struct uasm_reloc **r, | ||
422 | unsigned int tmp, | ||
423 | enum label_id lid) | ||
424 | { | ||
413 | /* Reset default page size */ | 425 | /* Reset default page size */ |
414 | if (PM_DEFAULT_MASK >> 16) { | 426 | if (PM_DEFAULT_MASK >> 16) { |
415 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | 427 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); |
416 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | 428 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); |
417 | uasm_il_b(p, r, label_leave); | 429 | uasm_il_b(p, r, lid); |
418 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 430 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
419 | } else if (PM_DEFAULT_MASK) { | 431 | } else if (PM_DEFAULT_MASK) { |
420 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | 432 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); |
421 | uasm_il_b(p, r, label_leave); | 433 | uasm_il_b(p, r, lid); |
422 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 434 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
423 | } else { | 435 | } else { |
424 | uasm_il_b(p, r, label_leave); | 436 | uasm_il_b(p, r, lid); |
425 | uasm_i_mtc0(p, 0, C0_PAGEMASK); | 437 | uasm_i_mtc0(p, 0, C0_PAGEMASK); |
426 | } | 438 | } |
427 | } | 439 | } |
428 | 440 | ||
441 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | ||
442 | struct uasm_label **l, | ||
443 | struct uasm_reloc **r, | ||
444 | unsigned int tmp, | ||
445 | enum tlb_write_entry wmode) | ||
446 | { | ||
447 | /* Set huge page tlb entry size */ | ||
448 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | ||
449 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | ||
450 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | ||
451 | |||
452 | build_tlb_write_entry(p, l, r, wmode); | ||
453 | |||
454 | build_restore_pagemask(p, r, tmp, label_leave); | ||
455 | } | ||
456 | |||
429 | /* | 457 | /* |
430 | * Check if Huge PTE is present, if so then jump to LABEL. | 458 | * Check if Huge PTE is present, if so then jump to LABEL. |
431 | */ | 459 | */ |
@@ -459,7 +487,7 @@ static __cpuinit void build_huge_update_entries(u32 **p, | |||
459 | if (!small_sequence) | 487 | if (!small_sequence) |
460 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | 488 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); |
461 | 489 | ||
462 | UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ | 490 | build_convert_pte_to_entrylo(p, pte); |
463 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ | 491 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ |
464 | /* convert to entrylo1 */ | 492 | /* convert to entrylo1 */ |
465 | if (small_sequence) | 493 | if (small_sequence) |
@@ -685,9 +713,17 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | |||
685 | if (cpu_has_64bits) { | 713 | if (cpu_has_64bits) { |
686 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ | 714 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
687 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 715 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
688 | uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ | 716 | if (kernel_uses_smartmips_rixi) { |
689 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 717 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
690 | uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ | 718 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
719 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
720 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
721 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
722 | } else { | ||
723 | uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
724 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
725 | uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
726 | } | ||
691 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 727 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
692 | } else { | 728 | } else { |
693 | int pte_off_even = sizeof(pte_t) / 2; | 729 | int pte_off_even = sizeof(pte_t) / 2; |
@@ -704,13 +740,23 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | |||
704 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 740 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
705 | if (r45k_bvahwbug()) | 741 | if (r45k_bvahwbug()) |
706 | build_tlb_probe_entry(p); | 742 | build_tlb_probe_entry(p); |
707 | UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ | 743 | if (kernel_uses_smartmips_rixi) { |
708 | if (r4k_250MHZhwbug()) | 744 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
709 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | 745 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
710 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 746 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
711 | UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ | 747 | if (r4k_250MHZhwbug()) |
712 | if (r45k_bvahwbug()) | 748 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); |
713 | uasm_i_mfc0(p, tmp, C0_INDEX); | 749 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
750 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
751 | } else { | ||
752 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
753 | if (r4k_250MHZhwbug()) | ||
754 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | ||
755 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
756 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
757 | if (r45k_bvahwbug()) | ||
758 | uasm_i_mfc0(p, tmp, C0_INDEX); | ||
759 | } | ||
714 | if (r4k_250MHZhwbug()) | 760 | if (r4k_250MHZhwbug()) |
715 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); | 761 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); |
716 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 762 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
@@ -986,9 +1032,14 @@ static void __cpuinit | |||
986 | build_pte_present(u32 **p, struct uasm_reloc **r, | 1032 | build_pte_present(u32 **p, struct uasm_reloc **r, |
987 | unsigned int pte, unsigned int ptr, enum label_id lid) | 1033 | unsigned int pte, unsigned int ptr, enum label_id lid) |
988 | { | 1034 | { |
989 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1035 | if (kernel_uses_smartmips_rixi) { |
990 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1036 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT); |
991 | uasm_il_bnez(p, r, pte, lid); | 1037 | uasm_il_beqz(p, r, pte, lid); |
1038 | } else { | ||
1039 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1040 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1041 | uasm_il_bnez(p, r, pte, lid); | ||
1042 | } | ||
992 | iPTE_LW(p, pte, ptr); | 1043 | iPTE_LW(p, pte, ptr); |
993 | } | 1044 | } |
994 | 1045 | ||
@@ -1273,6 +1324,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1273 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1324 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1274 | if (m4kc_tlbp_war()) | 1325 | if (m4kc_tlbp_war()) |
1275 | build_tlb_probe_entry(&p); | 1326 | build_tlb_probe_entry(&p); |
1327 | |||
1328 | if (kernel_uses_smartmips_rixi) { | ||
1329 | /* | ||
1330 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1331 | * have triggered it. Skip the expensive test.. | ||
1332 | */ | ||
1333 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1334 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); | ||
1335 | uasm_i_nop(&p); | ||
1336 | |||
1337 | uasm_i_tlbr(&p); | ||
1338 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1339 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1340 | uasm_i_beqz(&p, K0, 8); | ||
1341 | |||
1342 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1343 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1344 | /* | ||
1345 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1346 | * XI must have triggered it. | ||
1347 | */ | ||
1348 | uasm_i_andi(&p, K0, K0, 2); | ||
1349 | uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); | ||
1350 | |||
1351 | uasm_l_tlbl_goaround1(&l, p); | ||
1352 | /* Reload the PTE value */ | ||
1353 | iPTE_LW(&p, K0, K1); | ||
1354 | } | ||
1276 | build_make_valid(&p, &r, K0, K1); | 1355 | build_make_valid(&p, &r, K0, K1); |
1277 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | 1356 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1278 | 1357 | ||
@@ -1285,6 +1364,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1285 | iPTE_LW(&p, K0, K1); | 1364 | iPTE_LW(&p, K0, K1); |
1286 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1365 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1287 | build_tlb_probe_entry(&p); | 1366 | build_tlb_probe_entry(&p); |
1367 | |||
1368 | if (kernel_uses_smartmips_rixi) { | ||
1369 | /* | ||
1370 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1371 | * have triggered it. Skip the expensive test.. | ||
1372 | */ | ||
1373 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1374 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1375 | uasm_i_nop(&p); | ||
1376 | |||
1377 | uasm_i_tlbr(&p); | ||
1378 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1379 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1380 | uasm_i_beqz(&p, K0, 8); | ||
1381 | |||
1382 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1383 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1384 | /* | ||
1385 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1386 | * XI must have triggered it. | ||
1387 | */ | ||
1388 | uasm_i_andi(&p, K0, K0, 2); | ||
1389 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1390 | /* Reload the PTE value */ | ||
1391 | iPTE_LW(&p, K0, K1); | ||
1392 | |||
1393 | /* | ||
1394 | * We clobbered C0_PAGEMASK, restore it. On the other branch | ||
1395 | * it is restored in build_huge_tlb_write_entry. | ||
1396 | */ | ||
1397 | build_restore_pagemask(&p, &r, K0, label_nopage_tlbl); | ||
1398 | |||
1399 | uasm_l_tlbl_goaround2(&l, p); | ||
1400 | } | ||
1288 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); | 1401 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); |
1289 | build_huge_handler_tail(&p, &r, &l, K0, K1); | 1402 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1290 | #endif | 1403 | #endif |