aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-04-28 15:16:17 -0400
committerRalf Baechle <ralf@linux-mips.org>2010-04-30 15:52:47 -0400
commit3be6022c27ace1e3b4ba963e7ffd2e3b60cecd8a (patch)
tree617178ac2ee9395e609aef3899b56756fb701cbb /arch/mips
parent26b9e547e90db6b8b409084a9d4501124ff492b3 (diff)
MIPS: Use uasm_i_ds{r,l}l_safe() instead of uasm_i_ds{r,l}l() in tlbex.c
This makes the code somewhat cleaner while reducing the risk of shift amount overflows when various page table related options are changed. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/1154/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/mm/tlbex.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index d1f68aadbc4c..61374b2c930d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -408,7 +408,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
408 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 408 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
409 } else { 409 } else {
410#ifdef CONFIG_64BIT_PHYS_ADDR 410#ifdef CONFIG_64BIT_PHYS_ADDR
411 uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL)); 411 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
412#else 412#else
413 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 413 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
414#endif 414#endif
@@ -549,14 +549,14 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
549 * SMTC uses TCBind value as "CPU" index 549 * SMTC uses TCBind value as "CPU" index
550 */ 550 */
551 uasm_i_mfc0(p, ptr, C0_TCBIND); 551 uasm_i_mfc0(p, ptr, C0_TCBIND);
552 uasm_i_dsrl(p, ptr, ptr, 19); 552 uasm_i_dsrl_safe(p, ptr, ptr, 19);
553# else 553# else
554 /* 554 /*
555 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 555 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
556 * stored in CONTEXT. 556 * stored in CONTEXT.
557 */ 557 */
558 uasm_i_dmfc0(p, ptr, C0_CONTEXT); 558 uasm_i_dmfc0(p, ptr, C0_CONTEXT);
559 uasm_i_dsrl(p, ptr, ptr, 23); 559 uasm_i_dsrl_safe(p, ptr, ptr, 23);
560# endif 560# endif
561 UASM_i_LA_mostly(p, tmp, pgdc); 561 UASM_i_LA_mostly(p, tmp, pgdc);
562 uasm_i_daddu(p, ptr, ptr, tmp); 562 uasm_i_daddu(p, ptr, ptr, tmp);
@@ -569,17 +569,15 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
569 569
570 uasm_l_vmalloc_done(l, *p); 570 uasm_l_vmalloc_done(l, *p);
571 571
572 if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ 572 /* get pgd offset in bytes */
573 uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); 573 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
574 else
575 uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
576 574
577 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 575 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
578 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 576 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
579#ifndef __PAGETABLE_PMD_FOLDED 577#ifndef __PAGETABLE_PMD_FOLDED
580 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 578 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
581 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 579 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
582 uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 580 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
583 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 581 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
584 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 582 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
585#endif 583#endif
@@ -720,9 +718,9 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
720 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 718 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
721 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); 719 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
722 } else { 720 } else {
723 uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ 721 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
724 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ 722 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
725 uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ 723 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
726 } 724 }
727 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ 725 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
728 } else { 726 } else {
@@ -793,9 +791,9 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
793 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 791 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
794 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 792 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
795 uasm_i_xor(&p, K0, K0, K1); 793 uasm_i_xor(&p, K0, K0, K1);
796 uasm_i_dsrl32(&p, K1, K0, 62 - 32); 794 uasm_i_dsrl_safe(&p, K1, K0, 62);
797 uasm_i_dsrl(&p, K0, K0, 12 + 1); 795 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
798 uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32); 796 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
799 uasm_i_or(&p, K0, K0, K1); 797 uasm_i_or(&p, K0, K0, K1);
800 uasm_il_bnez(&p, &r, K0, label_leave); 798 uasm_il_bnez(&p, &r, K0, label_leave);
801 /* No need for uasm_i_nop */ 799 /* No need for uasm_i_nop */
@@ -1322,9 +1320,9 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1322 uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1320 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1323 uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1321 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1324 uasm_i_xor(&p, K0, K0, K1); 1322 uasm_i_xor(&p, K0, K0, K1);
1325 uasm_i_dsrl32(&p, K1, K0, 62 - 32); 1323 uasm_i_dsrl_safe(&p, K1, K0, 62);
1326 uasm_i_dsrl(&p, K0, K0, 12 + 1); 1324 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1327 uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32); 1325 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1328 uasm_i_or(&p, K0, K0, K1); 1326 uasm_i_or(&p, K0, K0, K1);
1329 uasm_il_bnez(&p, &r, K0, label_leave); 1327 uasm_il_bnez(&p, &r, K0, label_leave);
1330 /* No need for uasm_i_nop */ 1328 /* No need for uasm_i_nop */