diff options
author | David Daney <ddaney@caviumnetworks.com> | 2010-02-10 18:12:47 -0500 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2010-02-27 06:53:26 -0500 |
commit | 6dd9344cfc41bcc60a01cdc828cb278be7a10e01 (patch) | |
tree | 9c62d563eba8f3acfd1c826a63e6999261b06f5a /arch/mips/mm/tlbex.c | |
parent | 32546f38fab839eee6f62b3f06c2774eade4188a (diff) |
MIPS: Implement Read Inhibit/eXecute Inhibit
The SmartMIPS ASE specifies how Read Inhibit (RI) and eXecute Inhibit
(XI) bits in the page tables work. The upper two bits of EntryLo{0,1}
are RI and XI when the feature is enabled in the PageGrain register.
SmartMIPS only covers 32-bit systems. Cavium Octeon+ extends this to
64-bit systems by continuing to place the RI and XI bits in the top of
EntryLo even when EntryLo is 64-bits wide.
Because we need to carry the RI and XI bits in the PTE, the layout of
the PTE is changed. There is a two instruction overhead in the TLB
refill hot path to get the EntryLo bits into the proper position.
Also the TLB load exception has to probe the TLB to check if RI or XI
caused the exception.
Also of note is that the layout of the PTE bits is done at compile and
runtime rather than statically. In the 32-bit case this allows for
the same number of PFN bits as before the patch as the _PAGE_HUGE is
not supported in 32-bit kernels (we have _PAGE_NO_EXEC and
_PAGE_NO_READ instead of _PAGE_READ and _PAGE_HUGE).
The patch is tested on Cavium Octeon+, but should also work on 32-bit
systems with the Smart-MIPS ASE.
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/952/
Patchwork: http://patchwork.linux-mips.org/patch/956/
Patchwork: http://patchwork.linux-mips.org/patch/962/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 169 |
1 files changed, 141 insertions, 28 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4a2907c59569..0de0e4127d66 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -76,6 +76,8 @@ enum label_id { | |||
76 | label_vmalloc_done, | 76 | label_vmalloc_done, |
77 | label_tlbw_hazard, | 77 | label_tlbw_hazard, |
78 | label_split, | 78 | label_split, |
79 | label_tlbl_goaround1, | ||
80 | label_tlbl_goaround2, | ||
79 | label_nopage_tlbl, | 81 | label_nopage_tlbl, |
80 | label_nopage_tlbs, | 82 | label_nopage_tlbs, |
81 | label_nopage_tlbm, | 83 | label_nopage_tlbm, |
@@ -92,6 +94,8 @@ UASM_L_LA(_vmalloc) | |||
92 | UASM_L_LA(_vmalloc_done) | 94 | UASM_L_LA(_vmalloc_done) |
93 | UASM_L_LA(_tlbw_hazard) | 95 | UASM_L_LA(_tlbw_hazard) |
94 | UASM_L_LA(_split) | 96 | UASM_L_LA(_split) |
97 | UASM_L_LA(_tlbl_goaround1) | ||
98 | UASM_L_LA(_tlbl_goaround2) | ||
95 | UASM_L_LA(_nopage_tlbl) | 99 | UASM_L_LA(_nopage_tlbl) |
96 | UASM_L_LA(_nopage_tlbs) | 100 | UASM_L_LA(_nopage_tlbs) |
97 | UASM_L_LA(_nopage_tlbm) | 101 | UASM_L_LA(_nopage_tlbm) |
@@ -396,36 +400,60 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
396 | } | 400 | } |
397 | } | 401 | } |
398 | 402 | ||
399 | #ifdef CONFIG_HUGETLB_PAGE | 403 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
400 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | 404 | unsigned int reg) |
401 | struct uasm_label **l, | ||
402 | struct uasm_reloc **r, | ||
403 | unsigned int tmp, | ||
404 | enum tlb_write_entry wmode) | ||
405 | { | 405 | { |
406 | /* Set huge page tlb entry size */ | 406 | if (kernel_uses_smartmips_rixi) { |
407 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | 407 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); |
408 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | 408 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
409 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 409 | } else { |
410 | #ifdef CONFIG_64BIT_PHYS_ADDR | ||
411 | uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
412 | #else | ||
413 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | ||
414 | #endif | ||
415 | } | ||
416 | } | ||
410 | 417 | ||
411 | build_tlb_write_entry(p, l, r, wmode); | 418 | #ifdef CONFIG_HUGETLB_PAGE |
412 | 419 | ||
420 | static __cpuinit void build_restore_pagemask(u32 **p, | ||
421 | struct uasm_reloc **r, | ||
422 | unsigned int tmp, | ||
423 | enum label_id lid) | ||
424 | { | ||
413 | /* Reset default page size */ | 425 | /* Reset default page size */ |
414 | if (PM_DEFAULT_MASK >> 16) { | 426 | if (PM_DEFAULT_MASK >> 16) { |
415 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | 427 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); |
416 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | 428 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); |
417 | uasm_il_b(p, r, label_leave); | 429 | uasm_il_b(p, r, lid); |
418 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 430 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
419 | } else if (PM_DEFAULT_MASK) { | 431 | } else if (PM_DEFAULT_MASK) { |
420 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | 432 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); |
421 | uasm_il_b(p, r, label_leave); | 433 | uasm_il_b(p, r, lid); |
422 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 434 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
423 | } else { | 435 | } else { |
424 | uasm_il_b(p, r, label_leave); | 436 | uasm_il_b(p, r, lid); |
425 | uasm_i_mtc0(p, 0, C0_PAGEMASK); | 437 | uasm_i_mtc0(p, 0, C0_PAGEMASK); |
426 | } | 438 | } |
427 | } | 439 | } |
428 | 440 | ||
441 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | ||
442 | struct uasm_label **l, | ||
443 | struct uasm_reloc **r, | ||
444 | unsigned int tmp, | ||
445 | enum tlb_write_entry wmode) | ||
446 | { | ||
447 | /* Set huge page tlb entry size */ | ||
448 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | ||
449 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | ||
450 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | ||
451 | |||
452 | build_tlb_write_entry(p, l, r, wmode); | ||
453 | |||
454 | build_restore_pagemask(p, r, tmp, label_leave); | ||
455 | } | ||
456 | |||
429 | /* | 457 | /* |
430 | * Check if Huge PTE is present, if so then jump to LABEL. | 458 | * Check if Huge PTE is present, if so then jump to LABEL. |
431 | */ | 459 | */ |
@@ -459,7 +487,7 @@ static __cpuinit void build_huge_update_entries(u32 **p, | |||
459 | if (!small_sequence) | 487 | if (!small_sequence) |
460 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | 488 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); |
461 | 489 | ||
462 | UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ | 490 | build_convert_pte_to_entrylo(p, pte); |
463 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ | 491 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ |
464 | /* convert to entrylo1 */ | 492 | /* convert to entrylo1 */ |
465 | if (small_sequence) | 493 | if (small_sequence) |
@@ -685,9 +713,17 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | |||
685 | if (cpu_has_64bits) { | 713 | if (cpu_has_64bits) { |
686 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ | 714 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
687 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 715 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
688 | uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ | 716 | if (kernel_uses_smartmips_rixi) { |
689 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 717 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
690 | uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ | 718 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
719 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
720 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
721 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
722 | } else { | ||
723 | uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
724 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
725 | uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
726 | } | ||
691 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 727 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
692 | } else { | 728 | } else { |
693 | int pte_off_even = sizeof(pte_t) / 2; | 729 | int pte_off_even = sizeof(pte_t) / 2; |
@@ -704,13 +740,23 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | |||
704 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 740 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ |
705 | if (r45k_bvahwbug()) | 741 | if (r45k_bvahwbug()) |
706 | build_tlb_probe_entry(p); | 742 | build_tlb_probe_entry(p); |
707 | UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ | 743 | if (kernel_uses_smartmips_rixi) { |
708 | if (r4k_250MHZhwbug()) | 744 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); |
709 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | 745 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); |
710 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 746 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); |
711 | UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ | 747 | if (r4k_250MHZhwbug()) |
712 | if (r45k_bvahwbug()) | 748 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); |
713 | uasm_i_mfc0(p, tmp, C0_INDEX); | 749 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
750 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | ||
751 | } else { | ||
752 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | ||
753 | if (r4k_250MHZhwbug()) | ||
754 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | ||
755 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | ||
756 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | ||
757 | if (r45k_bvahwbug()) | ||
758 | uasm_i_mfc0(p, tmp, C0_INDEX); | ||
759 | } | ||
714 | if (r4k_250MHZhwbug()) | 760 | if (r4k_250MHZhwbug()) |
715 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); | 761 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); |
716 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 762 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
@@ -986,9 +1032,14 @@ static void __cpuinit | |||
986 | build_pte_present(u32 **p, struct uasm_reloc **r, | 1032 | build_pte_present(u32 **p, struct uasm_reloc **r, |
987 | unsigned int pte, unsigned int ptr, enum label_id lid) | 1033 | unsigned int pte, unsigned int ptr, enum label_id lid) |
988 | { | 1034 | { |
989 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1035 | if (kernel_uses_smartmips_rixi) { |
990 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | 1036 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT); |
991 | uasm_il_bnez(p, r, pte, lid); | 1037 | uasm_il_beqz(p, r, pte, lid); |
1038 | } else { | ||
1039 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1040 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | ||
1041 | uasm_il_bnez(p, r, pte, lid); | ||
1042 | } | ||
992 | iPTE_LW(p, pte, ptr); | 1043 | iPTE_LW(p, pte, ptr); |
993 | } | 1044 | } |
994 | 1045 | ||
@@ -1273,6 +1324,34 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1273 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1324 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1274 | if (m4kc_tlbp_war()) | 1325 | if (m4kc_tlbp_war()) |
1275 | build_tlb_probe_entry(&p); | 1326 | build_tlb_probe_entry(&p); |
1327 | |||
1328 | if (kernel_uses_smartmips_rixi) { | ||
1329 | /* | ||
1330 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1331 | * have triggered it. Skip the expensive test.. | ||
1332 | */ | ||
1333 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1334 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); | ||
1335 | uasm_i_nop(&p); | ||
1336 | |||
1337 | uasm_i_tlbr(&p); | ||
1338 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1339 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1340 | uasm_i_beqz(&p, K0, 8); | ||
1341 | |||
1342 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1343 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1344 | /* | ||
1345 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1346 | * XI must have triggered it. | ||
1347 | */ | ||
1348 | uasm_i_andi(&p, K0, K0, 2); | ||
1349 | uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); | ||
1350 | |||
1351 | uasm_l_tlbl_goaround1(&l, p); | ||
1352 | /* Reload the PTE value */ | ||
1353 | iPTE_LW(&p, K0, K1); | ||
1354 | } | ||
1276 | build_make_valid(&p, &r, K0, K1); | 1355 | build_make_valid(&p, &r, K0, K1); |
1277 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | 1356 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); |
1278 | 1357 | ||
@@ -1285,6 +1364,40 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1285 | iPTE_LW(&p, K0, K1); | 1364 | iPTE_LW(&p, K0, K1); |
1286 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | 1365 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
1287 | build_tlb_probe_entry(&p); | 1366 | build_tlb_probe_entry(&p); |
1367 | |||
1368 | if (kernel_uses_smartmips_rixi) { | ||
1369 | /* | ||
1370 | * If the page is not _PAGE_VALID, RI or XI could not | ||
1371 | * have triggered it. Skip the expensive test.. | ||
1372 | */ | ||
1373 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | ||
1374 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1375 | uasm_i_nop(&p); | ||
1376 | |||
1377 | uasm_i_tlbr(&p); | ||
1378 | /* Examine entrylo 0 or 1 based on ptr. */ | ||
1379 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | ||
1380 | uasm_i_beqz(&p, K0, 8); | ||
1381 | |||
1382 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | ||
1383 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | ||
1384 | /* | ||
1385 | * If the entryLo (now in K0) is valid (bit 1), RI or | ||
1386 | * XI must have triggered it. | ||
1387 | */ | ||
1388 | uasm_i_andi(&p, K0, K0, 2); | ||
1389 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | ||
1390 | /* Reload the PTE value */ | ||
1391 | iPTE_LW(&p, K0, K1); | ||
1392 | |||
1393 | /* | ||
1394 | * We clobbered C0_PAGEMASK, restore it. On the other branch | ||
1395 | * it is restored in build_huge_tlb_write_entry. | ||
1396 | */ | ||
1397 | build_restore_pagemask(&p, &r, K0, label_nopage_tlbl); | ||
1398 | |||
1399 | uasm_l_tlbl_goaround2(&l, p); | ||
1400 | } | ||
1288 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); | 1401 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); |
1289 | build_huge_handler_tail(&p, &r, &l, K0, K1); | 1402 | build_huge_handler_tail(&p, &r, &l, K0, K1); |
1290 | #endif | 1403 | #endif |