diff options
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 366 |
1 files changed, 283 insertions, 83 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index bb1719a55d22..86f004dc8355 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -29,8 +29,17 @@ | |||
29 | 29 | ||
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/war.h> | 31 | #include <asm/war.h> |
32 | #include <asm/uasm.h> | ||
33 | |||
34 | /* | ||
35 | * TLB load/store/modify handlers. | ||
36 | * | ||
37 | * Only the fastpath gets synthesized at runtime, the slowpath for | ||
38 | * do_page_fault remains normal asm. | ||
39 | */ | ||
40 | extern void tlb_do_page_fault_0(void); | ||
41 | extern void tlb_do_page_fault_1(void); | ||
32 | 42 | ||
33 | #include "uasm.h" | ||
34 | 43 | ||
35 | static inline int r45k_bvahwbug(void) | 44 | static inline int r45k_bvahwbug(void) |
36 | { | 45 | { |
@@ -73,18 +82,18 @@ static int __cpuinit m4kc_tlbp_war(void) | |||
73 | enum label_id { | 82 | enum label_id { |
74 | label_second_part = 1, | 83 | label_second_part = 1, |
75 | label_leave, | 84 | label_leave, |
76 | #ifdef MODULE_START | ||
77 | label_module_alloc, | ||
78 | #endif | ||
79 | label_vmalloc, | 85 | label_vmalloc, |
80 | label_vmalloc_done, | 86 | label_vmalloc_done, |
81 | label_tlbw_hazard, | 87 | label_tlbw_hazard, |
82 | label_split, | 88 | label_split, |
89 | label_tlbl_goaround1, | ||
90 | label_tlbl_goaround2, | ||
83 | label_nopage_tlbl, | 91 | label_nopage_tlbl, |
84 | label_nopage_tlbs, | 92 | label_nopage_tlbs, |
85 | label_nopage_tlbm, | 93 | label_nopage_tlbm, |
86 | label_smp_pgtable_change, | 94 | label_smp_pgtable_change, |
87 | label_r3000_write_probe_fail, | 95 | label_r3000_write_probe_fail, |
96 | label_large_segbits_fault, | ||
88 | #ifdef CONFIG_HUGETLB_PAGE | 97 | #ifdef CONFIG_HUGETLB_PAGE |
89 | label_tlb_huge_update, | 98 | label_tlb_huge_update, |
90 | #endif | 99 | #endif |
@@ -92,18 +101,18 @@ enum label_id { | |||
92 | 101 | ||
93 | UASM_L_LA(_second_part) | 102 | UASM_L_LA(_second_part) |
94 | UASM_L_LA(_leave) | 103 | UASM_L_LA(_leave) |
95 | #ifdef MODULE_START | ||
96 | UASM_L_LA(_module_alloc) | ||
97 | #endif | ||
98 | UASM_L_LA(_vmalloc) | 104 | UASM_L_LA(_vmalloc) |
99 | UASM_L_LA(_vmalloc_done) | 105 | UASM_L_LA(_vmalloc_done) |
100 | UASM_L_LA(_tlbw_hazard) | 106 | UASM_L_LA(_tlbw_hazard) |
101 | UASM_L_LA(_split) | 107 | UASM_L_LA(_split) |
108 | UASM_L_LA(_tlbl_goaround1) | ||
109 | UASM_L_LA(_tlbl_goaround2) | ||
102 | UASM_L_LA(_nopage_tlbl) | 110 | UASM_L_LA(_nopage_tlbl) |
103 | UASM_L_LA(_nopage_tlbs) | 111 | UASM_L_LA(_nopage_tlbs) |
104 | UASM_L_LA(_nopage_tlbm) | 112 | UASM_L_LA(_nopage_tlbm) |
105 | UASM_L_LA(_smp_pgtable_change) | 113 | UASM_L_LA(_smp_pgtable_change) |
106 | UASM_L_LA(_r3000_write_probe_fail) | 114 | UASM_L_LA(_r3000_write_probe_fail) |
115 | UASM_L_LA(_large_segbits_fault) | ||
107 | #ifdef CONFIG_HUGETLB_PAGE | 116 | #ifdef CONFIG_HUGETLB_PAGE |
108 | UASM_L_LA(_tlb_huge_update) | 117 | UASM_L_LA(_tlb_huge_update) |
109 | #endif | 118 | #endif |
@@ -160,6 +169,16 @@ static u32 tlb_handler[128] __cpuinitdata; | |||
160 | static struct uasm_label labels[128] __cpuinitdata; | 169 | static struct uasm_label labels[128] __cpuinitdata; |
161 | static struct uasm_reloc relocs[128] __cpuinitdata; | 170 | static struct uasm_reloc relocs[128] __cpuinitdata; |
162 | 171 | ||
172 | #ifdef CONFIG_64BIT | ||
173 | static int check_for_high_segbits __cpuinitdata; | ||
174 | #endif | ||
175 | |||
176 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | ||
177 | /* | ||
178 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, | ||
179 | * we cannot do r3000 under these circumstances. | ||
180 | */ | ||
181 | |||
163 | /* | 182 | /* |
164 | * The R3000 TLB handler is simple. | 183 | * The R3000 TLB handler is simple. |
165 | */ | 184 | */ |
@@ -199,6 +218,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void) | |||
199 | 218 | ||
200 | dump_handler((u32 *)ebase, 32); | 219 | dump_handler((u32 *)ebase, 32); |
201 | } | 220 | } |
221 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ | ||
202 | 222 | ||
203 | /* | 223 | /* |
204 | * The R4000 TLB handler is much more complicated. We have two | 224 | * The R4000 TLB handler is much more complicated. We have two |
@@ -396,36 +416,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
396 | } | 416 | } |
397 | } | 417 | } |
398 | 418 | ||
399 | #ifdef CONFIG_HUGETLB_PAGE | 419 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
400 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | 420 | unsigned int reg) |
401 | struct uasm_label **l, | ||
402 | struct uasm_reloc **r, | ||
403 | unsigned int tmp, | ||
404 | enum tlb_write_entry wmode) | ||
405 | { | 421 | { |
406 | /* Set huge page tlb entry size */ | 422 | if (kernel_uses_smartmips_rixi) { |
407 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | 423 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); |
408 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | 424 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
409 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 425 | } else { |
426 | #ifdef CONFIG_64BIT_PHYS_ADDR | ||
427 | uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
428 | #else | ||
429 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
430 | #endif | ||
431 | } | ||
432 | } | ||
410 | 433 | ||
411 | build_tlb_write_entry(p, l, r, wmode); | 434 | #ifdef CONFIG_HUGETLB_PAGE |
412 | 435 | ||
436 | static __cpuinit void build_restore_pagemask(u32 **p, | ||
437 | struct uasm_reloc **r, | ||
438 | unsigned int tmp, | ||
439 | enum label_id lid) | ||
440 | { | ||
413 | /* Reset default page size */ | 441 | /* Reset default page size */ |
414 | if (PM_DEFAULT_MASK >> 16) { | 442 | if (PM_DEFAULT_MASK >> 16) { |
415 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | 443 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); |
416 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | 444 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); |
417 | uasm_il_b(p, r, label_leave); | 445 | uasm_il_b(p, r, lid); |
418 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 446 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
419 | } else if (PM_DEFAULT_MASK) { | 447 | } else if (PM_DEFAULT_MASK) { |
420 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | 448 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); |
421 | uasm_il_b(p, r, label_leave); | 449 | uasm_il_b(p, r, lid); |
422 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 450 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
423 | } else { | 451 | } else { |
424 | uasm_il_b(p, r, label_leave); | 452 | uasm_il_b(p, r, lid); |
425 | uasm_i_mtc0(p, 0, C0_PAGEMASK); | 453 | uasm_i_mtc0(p, 0, C0_PAGEMASK); |
426 | } | 454 | } |
427 | } | 455 | } |
428 | 456 | ||
457 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | ||
458 | struct uasm_label **l, | ||
459 | struct uasm_reloc **r, | ||
460 | unsigned int tmp, | ||
461 | enum tlb_write_entry wmode) | ||
462 | { | ||
463 | /* Set huge page tlb entry size */ | ||
464 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | ||
465 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | ||
466 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | ||
467 | |||
468 | build_tlb_write_entry(p, l, r, wmode); | ||
469 | |||
470 | build_restore_pagemask(p, r, tmp, label_leave); | ||
471 | } | ||
472 | |||
429 | /* | 473 | /* |
430 | * Check if Huge PTE is present, if so then jump to LABEL. | 474 | * Check if Huge PTE is present, if so then jump to LABEL. |
431 | */ | 475 | */ |
@@ -459,15 +503,15 @@ static __cpuinit void build_huge_update_entries(u32 **p, | |||
459 | if (!small_sequence) | 503 | if (!small_sequence) |
460 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | 504 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); |
461 | 505 | ||
462 | UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ | 506 | build_convert_pte_to_entrylo(p, pte); |
463 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ | 507 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ |
464 | /* convert to entrylo1 */ | 508 | /* convert to entrylo1 */ |
465 | if (small_sequence) | 509 | if (small_sequence) |
466 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); | 510 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); |
467 | else | 511 | else |
468 | UASM_i_ADDU(p, pte, pte, tmp); | 512 | UASM_i_ADDU(p, pte, pte, tmp); |
469 | 513 | ||
470 | uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ | 514 | UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ |
471 | } | 515 | } |
472 | 516 | ||
473 | static __cpuinit void build_huge_handler_tail(u32 **p, | 517 | static __cpuinit void build_huge_handler_tail(u32 **p, |
@@ -497,30 +541,56 @@ static void __cpuinit | |||
497 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 541 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
498 | unsigned int tmp, unsigned int ptr) | 542 | unsigned int tmp, unsigned int ptr) |
499 | { | 543 | { |
544 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | ||
500 | long pgdc = (long)pgd_current; | 545 | long pgdc = (long)pgd_current; |
501 | 546 | #endif | |
502 | /* | 547 | /* |
503 | * The vmalloc handling is not in the hotpath. | 548 | * The vmalloc handling is not in the hotpath. |
504 | */ | 549 | */ |
505 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 550 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
506 | uasm_il_bltz(p, r, tmp, label_vmalloc); | 551 | |
552 | if (check_for_high_segbits) { | ||
553 | /* | ||
554 | * The kernel currently implicitely assumes that the | ||
555 | * MIPS SEGBITS parameter for the processor is | ||
556 | * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never | ||
557 | * allocate virtual addresses outside the maximum | ||
558 | * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But | ||
559 | * that doesn't prevent user code from accessing the | ||
560 | * higher xuseg addresses. Here, we make sure that | ||
561 | * everything but the lower xuseg addresses goes down | ||
562 | * the module_alloc/vmalloc path. | ||
563 | */ | ||
564 | uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | ||
565 | uasm_il_bnez(p, r, ptr, label_vmalloc); | ||
566 | } else { | ||
567 | uasm_il_bltz(p, r, tmp, label_vmalloc); | ||
568 | } | ||
507 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ | 569 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ |
508 | 570 | ||
509 | #ifdef CONFIG_SMP | 571 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
572 | /* | ||
573 | * &pgd << 11 stored in CONTEXT [23..63]. | ||
574 | */ | ||
575 | UASM_i_MFC0(p, ptr, C0_CONTEXT); | ||
576 | uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */ | ||
577 | uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */ | ||
578 | uasm_i_drotr(p, ptr, ptr, 11); | ||
579 | #elif defined(CONFIG_SMP) | ||
510 | # ifdef CONFIG_MIPS_MT_SMTC | 580 | # ifdef CONFIG_MIPS_MT_SMTC |
511 | /* | 581 | /* |
512 | * SMTC uses TCBind value as "CPU" index | 582 | * SMTC uses TCBind value as "CPU" index |
513 | */ | 583 | */ |
514 | uasm_i_mfc0(p, ptr, C0_TCBIND); | 584 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
515 | uasm_i_dsrl(p, ptr, ptr, 19); | 585 | uasm_i_dsrl_safe(p, ptr, ptr, 19); |
516 | # else | 586 | # else |
517 | /* | 587 | /* |
518 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 | 588 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
519 | * stored in CONTEXT. | 589 | * stored in CONTEXT. |
520 | */ | 590 | */ |
521 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); | 591 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); |
522 | uasm_i_dsrl(p, ptr, ptr, 23); | 592 | uasm_i_dsrl_safe(p, ptr, ptr, 23); |
523 | #endif | 593 | # endif |
524 | UASM_i_LA_mostly(p, tmp, pgdc); | 594 | UASM_i_LA_mostly(p, tmp, pgdc); |
525 | uasm_i_daddu(p, ptr, ptr, tmp); | 595 | uasm_i_daddu(p, ptr, ptr, tmp); |
526 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 596 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
@@ -532,42 +602,78 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
532 | 602 | ||
533 | uasm_l_vmalloc_done(l, *p); | 603 | uasm_l_vmalloc_done(l, *p); |
534 | 604 | ||
535 | if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ | 605 | /* get pgd offset in bytes */ |
536 | uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); | 606 | uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); |
537 | else | ||
538 | uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); | ||
539 | 607 | ||
540 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | 608 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); |
541 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | 609 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ |
610 | #ifndef __PAGETABLE_PMD_FOLDED | ||
542 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | 611 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
543 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | 612 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ |
544 | uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ | 613 | uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ |
545 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); | 614 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); |
546 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | 615 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ |
616 | #endif | ||
547 | } | 617 | } |
548 | 618 | ||
619 | enum vmalloc64_mode {not_refill, refill}; | ||
549 | /* | 620 | /* |
550 | * BVADDR is the faulting address, PTR is scratch. | 621 | * BVADDR is the faulting address, PTR is scratch. |
551 | * PTR will hold the pgd for vmalloc. | 622 | * PTR will hold the pgd for vmalloc. |
552 | */ | 623 | */ |
553 | static void __cpuinit | 624 | static void __cpuinit |
554 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 625 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
555 | unsigned int bvaddr, unsigned int ptr) | 626 | unsigned int bvaddr, unsigned int ptr, |
627 | enum vmalloc64_mode mode) | ||
556 | { | 628 | { |
557 | long swpd = (long)swapper_pg_dir; | 629 | long swpd = (long)swapper_pg_dir; |
630 | int single_insn_swpd; | ||
631 | int did_vmalloc_branch = 0; | ||
632 | |||
633 | single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); | ||
558 | 634 | ||
559 | uasm_l_vmalloc(l, *p); | 635 | uasm_l_vmalloc(l, *p); |
560 | 636 | ||
561 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | 637 | if (mode == refill && check_for_high_segbits) { |
562 | uasm_il_b(p, r, label_vmalloc_done); | 638 | if (single_insn_swpd) { |
563 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | 639 | uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); |
564 | } else { | 640 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); |
565 | UASM_i_LA_mostly(p, ptr, swpd); | 641 | did_vmalloc_branch = 1; |
566 | uasm_il_b(p, r, label_vmalloc_done); | 642 | /* fall through */ |
567 | if (uasm_in_compat_space_p(swpd)) | 643 | } else { |
568 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 644 | uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); |
569 | else | 645 | } |
570 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 646 | } |
647 | if (!did_vmalloc_branch) { | ||
648 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | ||
649 | uasm_il_b(p, r, label_vmalloc_done); | ||
650 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | ||
651 | } else { | ||
652 | UASM_i_LA_mostly(p, ptr, swpd); | ||
653 | uasm_il_b(p, r, label_vmalloc_done); | ||
654 | if (uasm_in_compat_space_p(swpd)) | ||
655 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | ||
656 | else | ||
657 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | ||
658 | } | ||
659 | } | ||
660 | if (mode == refill && check_for_high_segbits) { | ||
661 | uasm_l_large_segbits_fault(l, *p); | ||
662 | /* | ||
663 | * We get here if we are an xsseg address, or if we are | ||
664 | * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. | ||
665 | * | ||
666 | * Ignoring xsseg (assume disabled so would generate | ||
667 | * (address errors?), the only remaining possibility | ||
668 | * is the upper xuseg addresses. On processors with | ||
669 | * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these | ||
670 | * addresses would have taken an address error. We try | ||
671 | * to mimic that here by taking a load/istream page | ||
672 | * fault. | ||
673 | */ | ||
674 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | ||
675 | uasm_i_jr(p, ptr); | ||
676 | uasm_i_nop(p); | ||
571 | } | 677 | } |
572 | } | 678 | } |
573 | 679 | ||
@@ -674,35 +780,53 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | |||
674 | if (cpu_has_64bits) { | 780 | if (cpu_has_64bits) { |
675 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ | 781 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
676 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 782 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
677 | uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ | 783 | if (kernel_uses_smartmips_rixi) { |
678 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 784 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
679 | uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ | 785 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
680 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 786 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
787 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
788 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
789 | } else { | ||
790 | uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
791 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
792 | uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
793 | } | ||
794 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | ||
681 | } else { | 795 | } else { |
682 | int pte_off_even = sizeof(pte_t) / 2; | 796 | int pte_off_even = sizeof(pte_t) / 2; |
683 | int pte_off_odd = pte_off_even + sizeof(pte_t); | 797 | int pte_off_odd = pte_off_even + sizeof(pte_t); |
684 | 798 | ||
685 | /* The pte entries are pre-shifted */ | 799 | /* The pte entries are pre-shifted */ |
686 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ | 800 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ |
687 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 801 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
688 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ | 802 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ |
689 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 803 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
690 | } | 804 | } |
691 | #else | 805 | #else |
692 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ | 806 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ |
693 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 807 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
694 | if (r45k_bvahwbug()) | 808 | if (r45k_bvahwbug()) |
695 | build_tlb_probe_entry(p); | 809 | build_tlb_probe_entry(p); |
696 | UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ | 810 | if (kernel_uses_smartmips_rixi) { |
697 | if (r4k_250MHZhwbug()) | 811 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
698 | uasm_i_mtc0(p, 0, C0_ENTRYLO0); | 812 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
699 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 813 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
700 | UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ | 814 | if (r4k_250MHZhwbug()) |
701 | if (r45k_bvahwbug()) | 815 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); |
702 | uasm_i_mfc0(p, tmp, C0_INDEX); | 816 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
817 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
818 | } else { | ||
819 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
820 | if (r4k_250MHZhwbug()) | ||
821 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | ||
822 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
823 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
824 | if (r45k_bvahwbug()) | ||
825 | uasm_i_mfc0(p, tmp, C0_INDEX); | ||
826 | } | ||
703 | if (r4k_250MHZhwbug()) | 827 | if (r4k_250MHZhwbug()) |
704 | uasm_i_mtc0(p, 0, C0_ENTRYLO1); | 828 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); |
705 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 829 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
706 | #endif | 830 | #endif |
707 | } | 831 | } |
708 | 832 | ||
@@ -731,10 +855,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
731 | * create the plain linear handler | 855 | * create the plain linear handler |
732 | */ | 856 | */ |
733 | if (bcm1250_m3_war()) { | 857 | if (bcm1250_m3_war()) { |
734 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 858 | unsigned int segbits = 44; |
735 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 859 | |
860 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | ||
861 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | ||
736 | uasm_i_xor(&p, K0, K0, K1); | 862 | uasm_i_xor(&p, K0, K0, K1); |
737 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 863 | uasm_i_dsrl_safe(&p, K1, K0, 62); |
864 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | ||
865 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | ||
866 | uasm_i_or(&p, K0, K0, K1); | ||
738 | uasm_il_bnez(&p, &r, K0, label_leave); | 867 | uasm_il_bnez(&p, &r, K0, label_leave); |
739 | /* No need for uasm_i_nop */ | 868 | /* No need for uasm_i_nop */ |
740 | } | 869 | } |
@@ -763,7 +892,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
763 | #endif | 892 | #endif |
764 | 893 | ||
765 | #ifdef CONFIG_64BIT | 894 | #ifdef CONFIG_64BIT |
766 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); | 895 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill); |
767 | #endif | 896 | #endif |
768 | 897 | ||
769 | /* | 898 | /* |
@@ -802,8 +931,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
802 | } else { | 931 | } else { |
803 | #if defined(CONFIG_HUGETLB_PAGE) | 932 | #if defined(CONFIG_HUGETLB_PAGE) |
804 | const enum label_id ls = label_tlb_huge_update; | 933 | const enum label_id ls = label_tlb_huge_update; |
805 | #elif defined(MODULE_START) | ||
806 | const enum label_id ls = label_module_alloc; | ||
807 | #else | 934 | #else |
808 | const enum label_id ls = label_vmalloc; | 935 | const enum label_id ls = label_vmalloc; |
809 | #endif | 936 | #endif |
@@ -875,15 +1002,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
875 | } | 1002 | } |
876 | 1003 | ||
877 | /* | 1004 | /* |
878 | * TLB load/store/modify handlers. | ||
879 | * | ||
880 | * Only the fastpath gets synthesized at runtime, the slowpath for | ||
881 | * do_page_fault remains normal asm. | ||
882 | */ | ||
883 | extern void tlb_do_page_fault_0(void); | ||
884 | extern void tlb_do_page_fault_1(void); | ||
885 | |||
886 | /* | ||
887 | * 128 instructions for the fastpath handler is generous and should | 1005 | * 128 instructions for the fastpath handler is generous and should |
888 | * never be exceeded. | 1006 | * never be exceeded. |
889 | */ | 1007 | */ |
@@ -977,9 +1095,14 @@ static void __cpuinit | |||
977 | build_pte_present(u32 **p, struct uasm_reloc **r, | 1095 | build_pte_present(u32 **p, struct uasm_reloc **r, |
978 | unsigned int pte, unsigned int ptr, enum label_id lid) | 1096 | unsigned int pte, unsigned int ptr, enum label_id lid) |
979 | { | 1097 | { |
980 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1098 | if (kernel_uses_smartmips_rixi) { |
981 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1099 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT); |
982 | uasm_il_bnez(p, r, pte, lid); | 1100 | uasm_il_beqz(p, r, pte, lid); |
1101 | } else { | ||
1102 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1103 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1104 | uasm_il_bnez(p, r, pte, lid); | ||
1105 | } | ||
983 | iPTE_LW(p, pte, ptr); | 1106 | iPTE_LW(p, pte, ptr); |
984 | } | 1107 | } |
985 | 1108 | ||
@@ -1033,6 +1156,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r, | |||
1033 | iPTE_LW(p, pte, ptr); | 1156 | iPTE_LW(p, pte, ptr); |
1034 | } | 1157 | } |
1035 | 1158 | ||
1159 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | ||
1036 | /* | 1160 | /* |
1037 | * R3000 style TLB load/store/modify handlers. | 1161 | * R3000 style TLB load/store/modify handlers. |
1038 | */ | 1162 | */ |
@@ -1184,6 +1308,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) | |||
1184 | 1308 | ||
1185 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); | 1309 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1186 | } | 1310 | } |
1311 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ | ||
1187 | 1312 | ||
1188 | /* | 1313 | /* |
1189 | * R4000 style TLB load/store/modify handlers. | 1314 | * R4000 style TLB load/store/modify handlers. |
@@ -1235,7 +1360,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, | |||
1235 | uasm_i_eret(p); /* return from trap */ | 1360 | uasm_i_eret(p); /* return from trap */ |
1236 | 1361 | ||
1237 | #ifdef CONFIG_64BIT | 1362 | #ifdef CONFIG_64BIT |
1238 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr); | 1363 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); |
1239 | #endif | 1364 | #endif |
1240 | } | 1365 | } |
1241 | 1366 | ||
@@ -1250,10 +1375,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1250 | memset(relocs, 0, sizeof(relocs)); | 1375 | memset(relocs, 0, sizeof(relocs)); |
1251 | 1376 | ||
1252 | if (bcm1250_m3_war()) { | 1377 | if (bcm1250_m3_war()) { |
1253 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | 1378 | unsigned int segbits = 44; |
1254 | UASM_i_MFC0(&p, K1, C0_ENTRYHI); | 1379 | |
1380 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | ||
1381 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | ||
1255 | uasm_i_xor(&p, K0, K0, K1); | 1382 | uasm_i_xor(&p, K0, K0, K1); |
1256 | UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); | 1383 | uasm_i_dsrl_safe(&p, K1, K0, 62); |
1384 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | ||
1385 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | ||
1386 | uasm_i_or(&p, K0, K0, K1); | ||
1257 | uasm_il_bnez(&p, &r, K0, label_leave); | 1387 | uasm_il_bnez(&p, &r, K0, label_leave); |
1258 | /* No need for uasm_i_nop */ | 1388 | /* No need for uasm_i_nop */ |
1259 | } | 1389 | } |
@@ -1262,6 +1392,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1262 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1392 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1263 | if (m4kc_tlbp_war()) | 1393 | if (m4kc_tlbp_war()) |
1264 | build_tlb_probe_entry(&p); | 1394 | build_tlb_probe_entry(&p); |
1395 | |||
1396 | if (kernel_uses_smartmips_rixi) { | ||
1397 | /* | ||
1398 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1399 | * have triggered it. Skip the expensive test.. | ||
1400 | */ | ||
1401 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1402 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); | ||
1403 | uasm_i_nop(&p); | ||
1404 | |||
1405 | uasm_i_tlbr(&p); | ||
1406 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1407 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1408 | uasm_i_beqz(&p, K0, 8); | ||
1409 | |||
1410 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1411 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1412 | /* | ||
1413 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1414 | * XI must have triggered it. | ||
1415 | */ | ||
1416 | uasm_i_andi(&p, K0, K0, 2); | ||
1417 | uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); | ||
1418 | |||
1419 | uasm_l_tlbl_goaround1(&l, p); | ||
1420 | /* Reload the PTE value */ | ||
1421 | iPTE_LW(&p, K0, K1); | ||
1422 | } | ||
1265 | build_make_valid(&p, &r, K0, K1); | 1423 | build_make_valid(&p, &r, K0, K1); |
1266 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | 1424 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1267 | 1425 | ||
@@ -1274,6 +1432,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1274 | iPTE_LW(&p, K0, K1); | 1432 | iPTE_LW(&p, K0, K1); |
1275 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1433 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1276 | build_tlb_probe_entry(&p); | 1434 | build_tlb_probe_entry(&p); |
1435 | |||
1436 | if (kernel_uses_smartmips_rixi) { | ||
1437 | /* | ||
1438 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1439 | * have triggered it. Skip the expensive test.. | ||
1440 | */ | ||
1441 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1442 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1443 | uasm_i_nop(&p); | ||
1444 | |||
1445 | uasm_i_tlbr(&p); | ||
1446 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1447 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1448 | uasm_i_beqz(&p, K0, 8); | ||
1449 | |||
1450 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1451 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1452 | /* | ||
1453 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1454 | * XI must have triggered it. | ||
1455 | */ | ||
1456 | uasm_i_andi(&p, K0, K0, 2); | ||
1457 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1458 | /* Reload the PTE value */ | ||
1459 | iPTE_LW(&p, K0, K1); | ||
1460 | |||
1461 | /* | ||
1462 | * We clobbered C0_PAGEMASK, restore it. On the other branch | ||
1463 | * it is restored in build_huge_tlb_write_entry. | ||
1464 | */ | ||
1465 | build_restore_pagemask(&p, &r, K0, label_nopage_tlbl); | ||
1466 | |||
1467 | uasm_l_tlbl_goaround2(&l, p); | ||
1468 | } | ||
1277 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); | 1469 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); |
1278 | build_huge_handler_tail(&p, &r, &l, K0, K1); | 1470 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1279 | #endif | 1471 | #endif |
@@ -1392,6 +1584,10 @@ void __cpuinit build_tlb_refill_handler(void) | |||
1392 | */ | 1584 | */ |
1393 | static int run_once = 0; | 1585 | static int run_once = 0; |
1394 | 1586 | ||
1587 | #ifdef CONFIG_64BIT | ||
1588 | check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | ||
1589 | #endif | ||
1590 | |||
1395 | switch (current_cpu_type()) { | 1591 | switch (current_cpu_type()) { |
1396 | case CPU_R2000: | 1592 | case CPU_R2000: |
1397 | case CPU_R3000: | 1593 | case CPU_R3000: |
@@ -1400,6 +1596,7 @@ void __cpuinit build_tlb_refill_handler(void) | |||
1400 | case CPU_TX3912: | 1596 | case CPU_TX3912: |
1401 | case CPU_TX3922: | 1597 | case CPU_TX3922: |
1402 | case CPU_TX3927: | 1598 | case CPU_TX3927: |
1599 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | ||
1403 | build_r3000_tlb_refill_handler(); | 1600 | build_r3000_tlb_refill_handler(); |
1404 | if (!run_once) { | 1601 | if (!run_once) { |
1405 | build_r3000_tlb_load_handler(); | 1602 | build_r3000_tlb_load_handler(); |
@@ -1407,6 +1604,9 @@ void __cpuinit build_tlb_refill_handler(void) | |||
1407 | build_r3000_tlb_modify_handler(); | 1604 | build_r3000_tlb_modify_handler(); |
1408 | run_once++; | 1605 | run_once++; |
1409 | } | 1606 | } |
1607 | #else | ||
1608 | panic("No R3000 TLB refill handler"); | ||
1609 | #endif | ||
1410 | break; | 1610 | break; |
1411 | 1611 | ||
1412 | case CPU_R6000: | 1612 | case CPU_R6000: |