diff options
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 188 |
1 files changed, 151 insertions, 37 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index badcf5e8d695..0de0e4127d66 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -29,8 +29,7 @@ | |||
29 | 29 | ||
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/war.h> | 31 | #include <asm/war.h> |
32 | 32 | #include <asm/uasm.h> | |
33 | #include "uasm.h" | ||
34 | 33 | ||
35 | static inline int r45k_bvahwbug(void) | 34 | static inline int r45k_bvahwbug(void) |
36 | { | 35 | { |
@@ -77,6 +76,8 @@ enum label_id { | |||
77 | label_vmalloc_done, | 76 | label_vmalloc_done, |
78 | label_tlbw_hazard, | 77 | label_tlbw_hazard, |
79 | label_split, | 78 | label_split, |
79 | label_tlbl_goaround1, | ||
80 | label_tlbl_goaround2, | ||
80 | label_nopage_tlbl, | 81 | label_nopage_tlbl, |
81 | label_nopage_tlbs, | 82 | label_nopage_tlbs, |
82 | label_nopage_tlbm, | 83 | label_nopage_tlbm, |
@@ -93,6 +94,8 @@ UASM_L_LA(_vmalloc) | |||
93 | UASM_L_LA(_vmalloc_done) | 94 | UASM_L_LA(_vmalloc_done) |
94 | UASM_L_LA(_tlbw_hazard) | 95 | UASM_L_LA(_tlbw_hazard) |
95 | UASM_L_LA(_split) | 96 | UASM_L_LA(_split) |
97 | UASM_L_LA(_tlbl_goaround1) | ||
98 | UASM_L_LA(_tlbl_goaround2) | ||
96 | UASM_L_LA(_nopage_tlbl) | 99 | UASM_L_LA(_nopage_tlbl) |
97 | UASM_L_LA(_nopage_tlbs) | 100 | UASM_L_LA(_nopage_tlbs) |
98 | UASM_L_LA(_nopage_tlbm) | 101 | UASM_L_LA(_nopage_tlbm) |
@@ -397,36 +400,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
397 | } | 400 | } |
398 | } | 401 | } |
399 | 402 | ||
400 | #ifdef CONFIG_HUGETLB_PAGE | 403 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
401 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | 404 | unsigned int reg) |
402 | struct uasm_label **l, | ||
403 | struct uasm_reloc **r, | ||
404 | unsigned int tmp, | ||
405 | enum tlb_write_entry wmode) | ||
406 | { | 405 | { |
407 | /* Set huge page tlb entry size */ | 406 | if (kernel_uses_smartmips_rixi) { |
408 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | 407 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); |
409 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | 408 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
410 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 409 | } else { |
410 | #ifdef CONFIG_64BIT_PHYS_ADDR | ||
411 | uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
412 | #else | ||
413 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
414 | #endif | ||
415 | } | ||
416 | } | ||
411 | 417 | ||
412 | build_tlb_write_entry(p, l, r, wmode); | 418 | #ifdef CONFIG_HUGETLB_PAGE |
413 | 419 | ||
420 | static __cpuinit void build_restore_pagemask(u32 **p, | ||
421 | struct uasm_reloc **r, | ||
422 | unsigned int tmp, | ||
423 | enum label_id lid) | ||
424 | { | ||
414 | /* Reset default page size */ | 425 | /* Reset default page size */ |
415 | if (PM_DEFAULT_MASK >> 16) { | 426 | if (PM_DEFAULT_MASK >> 16) { |
416 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | 427 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); |
417 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | 428 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); |
418 | uasm_il_b(p, r, label_leave); | 429 | uasm_il_b(p, r, lid); |
419 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 430 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
420 | } else if (PM_DEFAULT_MASK) { | 431 | } else if (PM_DEFAULT_MASK) { |
421 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | 432 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); |
422 | uasm_il_b(p, r, label_leave); | 433 | uasm_il_b(p, r, lid); |
423 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 434 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
424 | } else { | 435 | } else { |
425 | uasm_il_b(p, r, label_leave); | 436 | uasm_il_b(p, r, lid); |
426 | uasm_i_mtc0(p, 0, C0_PAGEMASK); | 437 | uasm_i_mtc0(p, 0, C0_PAGEMASK); |
427 | } | 438 | } |
428 | } | 439 | } |
429 | 440 | ||
441 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | ||
442 | struct uasm_label **l, | ||
443 | struct uasm_reloc **r, | ||
444 | unsigned int tmp, | ||
445 | enum tlb_write_entry wmode) | ||
446 | { | ||
447 | /* Set huge page tlb entry size */ | ||
448 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | ||
449 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | ||
450 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | ||
451 | |||
452 | build_tlb_write_entry(p, l, r, wmode); | ||
453 | |||
454 | build_restore_pagemask(p, r, tmp, label_leave); | ||
455 | } | ||
456 | |||
430 | /* | 457 | /* |
431 | * Check if Huge PTE is present, if so then jump to LABEL. | 458 | * Check if Huge PTE is present, if so then jump to LABEL. |
432 | */ | 459 | */ |
@@ -460,15 +487,15 @@ static __cpuinit void build_huge_update_entries(u32 **p, | |||
460 | if (!small_sequence) | 487 | if (!small_sequence) |
461 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | 488 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); |
462 | 489 | ||
463 | UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ | 490 | build_convert_pte_to_entrylo(p, pte); |
464 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ | 491 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ |
465 | /* convert to entrylo1 */ | 492 | /* convert to entrylo1 */ |
466 | if (small_sequence) | 493 | if (small_sequence) |
467 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); | 494 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); |
468 | else | 495 | else |
469 | UASM_i_ADDU(p, pte, pte, tmp); | 496 | UASM_i_ADDU(p, pte, pte, tmp); |
470 | 497 | ||
471 | uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ | 498 | UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ |
472 | } | 499 | } |
473 | 500 | ||
474 | static __cpuinit void build_huge_handler_tail(u32 **p, | 501 | static __cpuinit void build_huge_handler_tail(u32 **p, |
@@ -549,11 +576,13 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
549 | 576 | ||
550 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | 577 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); |
551 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | 578 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ |
579 | #ifndef __PAGETABLE_PMD_FOLDED | ||
552 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | 580 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
553 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | 581 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ |
554 | uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ | 582 | uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ |
555 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); | 583 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); |
556 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | 584 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ |
585 | #endif | ||
557 | } | 586 | } |
558 | 587 | ||
559 | /* | 588 | /* |
@@ -684,35 +713,53 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | |||
684 | if (cpu_has_64bits) { | 713 | if (cpu_has_64bits) { |
685 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ | 714 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
686 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 715 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
687 | uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ | 716 | if (kernel_uses_smartmips_rixi) { |
688 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 717 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
689 | uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ | 718 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
690 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 719 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
720 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
721 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
722 | } else { | ||
723 | uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
724 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
725 | uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
726 | } | ||
727 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | ||
691 | } else { | 728 | } else { |
692 | int pte_off_even = sizeof(pte_t) / 2; | 729 | int pte_off_even = sizeof(pte_t) / 2; |
693 | int pte_off_odd = pte_off_even + sizeof(pte_t); | 730 | int pte_off_odd = pte_off_even + sizeof(pte_t); |
694 | 731 | ||
695 | /* The pte entries are pre-shifted */ | 732 | /* The pte entries are pre-shifted */ |
696 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ | 733 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ |
697 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 734 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
698 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ | 735 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ |
699 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 736 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
700 | } | 737 | } |
701 | #else | 738 | #else |
702 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ | 739 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ |
703 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 740 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
704 | if (r45k_bvahwbug()) | 741 | if (r45k_bvahwbug()) |
705 | build_tlb_probe_entry(p); | 742 | build_tlb_probe_entry(p); |
706 | UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ | 743 | if (kernel_uses_smartmips_rixi) { |
707 | if (r4k_250MHZhwbug()) | 744 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
708 | uasm_i_mtc0(p, 0, C0_ENTRYLO0); | 745 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
709 | uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ | 746 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
710 | UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ | 747 | if (r4k_250MHZhwbug()) |
711 | if (r45k_bvahwbug()) | 748 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); |
712 | uasm_i_mfc0(p, tmp, C0_INDEX); | 749 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
750 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
751 | } else { | ||
752 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
753 | if (r4k_250MHZhwbug()) | ||
754 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | ||
755 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
756 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
757 | if (r45k_bvahwbug()) | ||
758 | uasm_i_mfc0(p, tmp, C0_INDEX); | ||
759 | } | ||
713 | if (r4k_250MHZhwbug()) | 760 | if (r4k_250MHZhwbug()) |
714 | uasm_i_mtc0(p, 0, C0_ENTRYLO1); | 761 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); |
715 | uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ | 762 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
716 | #endif | 763 | #endif |
717 | } | 764 | } |
718 | 765 | ||
@@ -985,9 +1032,14 @@ static void __cpuinit | |||
985 | build_pte_present(u32 **p, struct uasm_reloc **r, | 1032 | build_pte_present(u32 **p, struct uasm_reloc **r, |
986 | unsigned int pte, unsigned int ptr, enum label_id lid) | 1033 | unsigned int pte, unsigned int ptr, enum label_id lid) |
987 | { | 1034 | { |
988 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1035 | if (kernel_uses_smartmips_rixi) { |
989 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1036 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT); |
990 | uasm_il_bnez(p, r, pte, lid); | 1037 | uasm_il_beqz(p, r, pte, lid); |
1038 | } else { | ||
1039 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1040 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1041 | uasm_il_bnez(p, r, pte, lid); | ||
1042 | } | ||
991 | iPTE_LW(p, pte, ptr); | 1043 | iPTE_LW(p, pte, ptr); |
992 | } | 1044 | } |
993 | 1045 | ||
@@ -1272,6 +1324,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1272 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1324 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1273 | if (m4kc_tlbp_war()) | 1325 | if (m4kc_tlbp_war()) |
1274 | build_tlb_probe_entry(&p); | 1326 | build_tlb_probe_entry(&p); |
1327 | |||
1328 | if (kernel_uses_smartmips_rixi) { | ||
1329 | /* | ||
1330 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1331 | * have triggered it. Skip the expensive test.. | ||
1332 | */ | ||
1333 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1334 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); | ||
1335 | uasm_i_nop(&p); | ||
1336 | |||
1337 | uasm_i_tlbr(&p); | ||
1338 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1339 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1340 | uasm_i_beqz(&p, K0, 8); | ||
1341 | |||
1342 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1343 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1344 | /* | ||
1345 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1346 | * XI must have triggered it. | ||
1347 | */ | ||
1348 | uasm_i_andi(&p, K0, K0, 2); | ||
1349 | uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); | ||
1350 | |||
1351 | uasm_l_tlbl_goaround1(&l, p); | ||
1352 | /* Reload the PTE value */ | ||
1353 | iPTE_LW(&p, K0, K1); | ||
1354 | } | ||
1275 | build_make_valid(&p, &r, K0, K1); | 1355 | build_make_valid(&p, &r, K0, K1); |
1276 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | 1356 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1277 | 1357 | ||
@@ -1284,6 +1364,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1284 | iPTE_LW(&p, K0, K1); | 1364 | iPTE_LW(&p, K0, K1); |
1285 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1365 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1286 | build_tlb_probe_entry(&p); | 1366 | build_tlb_probe_entry(&p); |
1367 | |||
1368 | if (kernel_uses_smartmips_rixi) { | ||
1369 | /* | ||
1370 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1371 | * have triggered it. Skip the expensive test.. | ||
1372 | */ | ||
1373 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1374 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1375 | uasm_i_nop(&p); | ||
1376 | |||
1377 | uasm_i_tlbr(&p); | ||
1378 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1379 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1380 | uasm_i_beqz(&p, K0, 8); | ||
1381 | |||
1382 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1383 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1384 | /* | ||
1385 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1386 | * XI must have triggered it. | ||
1387 | */ | ||
1388 | uasm_i_andi(&p, K0, K0, 2); | ||
1389 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1390 | /* Reload the PTE value */ | ||
1391 | iPTE_LW(&p, K0, K1); | ||
1392 | |||
1393 | /* | ||
1394 | * We clobbered C0_PAGEMASK, restore it. On the other branch | ||
1395 | * it is restored in build_huge_tlb_write_entry. | ||
1396 | */ | ||
1397 | build_restore_pagemask(&p, &r, K0, label_nopage_tlbl); | ||
1398 | |||
1399 | uasm_l_tlbl_goaround2(&l, p); | ||
1400 | } | ||
1287 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); | 1401 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); |
1288 | build_huge_handler_tail(&p, &r, &l, K0, K1); | 1402 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1289 | #endif | 1403 | #endif |