diff options
author | David Daney <ddaney@caviumnetworks.com> | 2010-04-28 15:16:18 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2010-04-30 15:52:48 -0400 |
commit | 1ec56329ff939aba29291c0dec1a28ceed660162 (patch) | |
tree | 45788e1f4f0baef44d727e7ca31821c16ba6317f /arch | |
parent | 3be6022c27ace1e3b4ba963e7ffd2e3b60cecd8a (diff) |
MIPS: Check for accesses beyond the end of the PGD.
For some combinations of PAGE_SIZE and vmbits, it is possible to have
userspace access that are beyond what is covered by the PGD, but within
vmbits. Such an access would cause the TLB refill handler to load garbage
values for PMD and PTE potentially giving userspace access to parts of the
physical address space to which it is not entitled.
In the TLB refill hot path, we add a single dsrl instruction so we can
check if any bits outside of the range covered by the PGD are set. In
the vmalloc side we then separate the bad case from the normal vmalloc
case and call tlb_do_page_fault_0 if warranted. This slows us down a
bit, but has the benefit of yielding deterministic behavior.
[Ralf: Fixed build error for 32-bit kernels.]
[Ralf: Folded lmo commit c8c0e22b2aa3982852b44279638ef37f9aa31b7d into this
commit.]
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/1152/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/mm/tlbex.c | 110 |
1 files changed, 87 insertions, 23 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 61374b2c930d..86f004dc8355 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -31,6 +31,16 @@ | |||
31 | #include <asm/war.h> | 31 | #include <asm/war.h> |
32 | #include <asm/uasm.h> | 32 | #include <asm/uasm.h> |
33 | 33 | ||
34 | /* | ||
35 | * TLB load/store/modify handlers. | ||
36 | * | ||
37 | * Only the fastpath gets synthesized at runtime, the slowpath for | ||
38 | * do_page_fault remains normal asm. | ||
39 | */ | ||
40 | extern void tlb_do_page_fault_0(void); | ||
41 | extern void tlb_do_page_fault_1(void); | ||
42 | |||
43 | |||
34 | static inline int r45k_bvahwbug(void) | 44 | static inline int r45k_bvahwbug(void) |
35 | { | 45 | { |
36 | /* XXX: We should probe for the presence of this bug, but we don't. */ | 46 | /* XXX: We should probe for the presence of this bug, but we don't. */ |
@@ -83,6 +93,7 @@ enum label_id { | |||
83 | label_nopage_tlbm, | 93 | label_nopage_tlbm, |
84 | label_smp_pgtable_change, | 94 | label_smp_pgtable_change, |
85 | label_r3000_write_probe_fail, | 95 | label_r3000_write_probe_fail, |
96 | label_large_segbits_fault, | ||
86 | #ifdef CONFIG_HUGETLB_PAGE | 97 | #ifdef CONFIG_HUGETLB_PAGE |
87 | label_tlb_huge_update, | 98 | label_tlb_huge_update, |
88 | #endif | 99 | #endif |
@@ -101,6 +112,7 @@ UASM_L_LA(_nopage_tlbs) | |||
101 | UASM_L_LA(_nopage_tlbm) | 112 | UASM_L_LA(_nopage_tlbm) |
102 | UASM_L_LA(_smp_pgtable_change) | 113 | UASM_L_LA(_smp_pgtable_change) |
103 | UASM_L_LA(_r3000_write_probe_fail) | 114 | UASM_L_LA(_r3000_write_probe_fail) |
115 | UASM_L_LA(_large_segbits_fault) | ||
104 | #ifdef CONFIG_HUGETLB_PAGE | 116 | #ifdef CONFIG_HUGETLB_PAGE |
105 | UASM_L_LA(_tlb_huge_update) | 117 | UASM_L_LA(_tlb_huge_update) |
106 | #endif | 118 | #endif |
@@ -157,6 +169,10 @@ static u32 tlb_handler[128] __cpuinitdata; | |||
157 | static struct uasm_label labels[128] __cpuinitdata; | 169 | static struct uasm_label labels[128] __cpuinitdata; |
158 | static struct uasm_reloc relocs[128] __cpuinitdata; | 170 | static struct uasm_reloc relocs[128] __cpuinitdata; |
159 | 171 | ||
172 | #ifdef CONFIG_64BIT | ||
173 | static int check_for_high_segbits __cpuinitdata; | ||
174 | #endif | ||
175 | |||
160 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 176 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
161 | /* | 177 | /* |
162 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, | 178 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, |
@@ -532,7 +548,24 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
532 | * The vmalloc handling is not in the hotpath. | 548 | * The vmalloc handling is not in the hotpath. |
533 | */ | 549 | */ |
534 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 550 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
535 | uasm_il_bltz(p, r, tmp, label_vmalloc); | 551 | |
552 | if (check_for_high_segbits) { | ||
553 | /* | ||
554 | * The kernel currently implicitely assumes that the | ||
555 | * MIPS SEGBITS parameter for the processor is | ||
556 | * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never | ||
557 | * allocate virtual addresses outside the maximum | ||
558 | * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But | ||
559 | * that doesn't prevent user code from accessing the | ||
560 | * higher xuseg addresses. Here, we make sure that | ||
561 | * everything but the lower xuseg addresses goes down | ||
562 | * the module_alloc/vmalloc path. | ||
563 | */ | ||
564 | uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | ||
565 | uasm_il_bnez(p, r, ptr, label_vmalloc); | ||
566 | } else { | ||
567 | uasm_il_bltz(p, r, tmp, label_vmalloc); | ||
568 | } | ||
536 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ | 569 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ |
537 | 570 | ||
538 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 571 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
@@ -583,28 +616,64 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
583 | #endif | 616 | #endif |
584 | } | 617 | } |
585 | 618 | ||
619 | enum vmalloc64_mode {not_refill, refill}; | ||
586 | /* | 620 | /* |
587 | * BVADDR is the faulting address, PTR is scratch. | 621 | * BVADDR is the faulting address, PTR is scratch. |
588 | * PTR will hold the pgd for vmalloc. | 622 | * PTR will hold the pgd for vmalloc. |
589 | */ | 623 | */ |
590 | static void __cpuinit | 624 | static void __cpuinit |
591 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 625 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
592 | unsigned int bvaddr, unsigned int ptr) | 626 | unsigned int bvaddr, unsigned int ptr, |
627 | enum vmalloc64_mode mode) | ||
593 | { | 628 | { |
594 | long swpd = (long)swapper_pg_dir; | 629 | long swpd = (long)swapper_pg_dir; |
630 | int single_insn_swpd; | ||
631 | int did_vmalloc_branch = 0; | ||
632 | |||
633 | single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); | ||
595 | 634 | ||
596 | uasm_l_vmalloc(l, *p); | 635 | uasm_l_vmalloc(l, *p); |
597 | 636 | ||
598 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | 637 | if (mode == refill && check_for_high_segbits) { |
599 | uasm_il_b(p, r, label_vmalloc_done); | 638 | if (single_insn_swpd) { |
600 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | 639 | uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); |
601 | } else { | 640 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); |
602 | UASM_i_LA_mostly(p, ptr, swpd); | 641 | did_vmalloc_branch = 1; |
603 | uasm_il_b(p, r, label_vmalloc_done); | 642 | /* fall through */ |
604 | if (uasm_in_compat_space_p(swpd)) | 643 | } else { |
605 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 644 | uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); |
606 | else | 645 | } |
607 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 646 | } |
647 | if (!did_vmalloc_branch) { | ||
648 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | ||
649 | uasm_il_b(p, r, label_vmalloc_done); | ||
650 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | ||
651 | } else { | ||
652 | UASM_i_LA_mostly(p, ptr, swpd); | ||
653 | uasm_il_b(p, r, label_vmalloc_done); | ||
654 | if (uasm_in_compat_space_p(swpd)) | ||
655 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | ||
656 | else | ||
657 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | ||
658 | } | ||
659 | } | ||
660 | if (mode == refill && check_for_high_segbits) { | ||
661 | uasm_l_large_segbits_fault(l, *p); | ||
662 | /* | ||
663 | * We get here if we are an xsseg address, or if we are | ||
664 | * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. | ||
665 | * | ||
666 | * Ignoring xsseg (assume disabled so would generate | ||
667 | * (address errors?), the only remaining possibility | ||
668 | * is the upper xuseg addresses. On processors with | ||
669 | * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these | ||
670 | * addresses would have taken an address error. We try | ||
671 | * to mimic that here by taking a load/istream page | ||
672 | * fault. | ||
673 | */ | ||
674 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | ||
675 | uasm_i_jr(p, ptr); | ||
676 | uasm_i_nop(p); | ||
608 | } | 677 | } |
609 | } | 678 | } |
610 | 679 | ||
@@ -823,7 +892,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
823 | #endif | 892 | #endif |
824 | 893 | ||
825 | #ifdef CONFIG_64BIT | 894 | #ifdef CONFIG_64BIT |
826 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); | 895 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill); |
827 | #endif | 896 | #endif |
828 | 897 | ||
829 | /* | 898 | /* |
@@ -933,15 +1002,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) | |||
933 | } | 1002 | } |
934 | 1003 | ||
935 | /* | 1004 | /* |
936 | * TLB load/store/modify handlers. | ||
937 | * | ||
938 | * Only the fastpath gets synthesized at runtime, the slowpath for | ||
939 | * do_page_fault remains normal asm. | ||
940 | */ | ||
941 | extern void tlb_do_page_fault_0(void); | ||
942 | extern void tlb_do_page_fault_1(void); | ||
943 | |||
944 | /* | ||
945 | * 128 instructions for the fastpath handler is generous and should | 1005 | * 128 instructions for the fastpath handler is generous and should |
946 | * never be exceeded. | 1006 | * never be exceeded. |
947 | */ | 1007 | */ |
@@ -1300,7 +1360,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, | |||
1300 | uasm_i_eret(p); /* return from trap */ | 1360 | uasm_i_eret(p); /* return from trap */ |
1301 | 1361 | ||
1302 | #ifdef CONFIG_64BIT | 1362 | #ifdef CONFIG_64BIT |
1303 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr); | 1363 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); |
1304 | #endif | 1364 | #endif |
1305 | } | 1365 | } |
1306 | 1366 | ||
@@ -1524,6 +1584,10 @@ void __cpuinit build_tlb_refill_handler(void) | |||
1524 | */ | 1584 | */ |
1525 | static int run_once = 0; | 1585 | static int run_once = 0; |
1526 | 1586 | ||
1587 | #ifdef CONFIG_64BIT | ||
1588 | check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | ||
1589 | #endif | ||
1590 | |||
1527 | switch (current_cpu_type()) { | 1591 | switch (current_cpu_type()) { |
1528 | case CPU_R2000: | 1592 | case CPU_R2000: |
1529 | case CPU_R3000: | 1593 | case CPU_R3000: |