aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2009-05-27 20:47:44 -0400
committerRalf Baechle <ralf@linux-mips.org>2009-06-17 06:06:30 -0400
commitfd062c847a8cea2821347d7e18165dfa658f7dce (patch)
tree95410c5460f7d153a9c9b15184ea52e2059427c3 /arch/mips/mm
parentdd7943920b492d9d8a79080fe05e25ecd7e10bc3 (diff)
MIPS: TLB support for hugetlbfs.
The TLB handlers need to check for huge pages and give them special handling. Huge pages consist of two contiguous sub-pages of physical memory. * Loading entrylo0 and entrylo1 need to be handled specially. * The page mask must be set for huge pages and then restored after writing the TLB entries. * The PTE for huge pages resides in the PMD, we halt traversal of the tables there. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/tlb-r4k.c43
-rw-r--r--arch/mips/mm/tlbex.c165
2 files changed, 196 insertions, 12 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 892be426787c..f60fe513eb60 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/hugetlb.h>
14 15
15#include <asm/cpu.h> 16#include <asm/cpu.h>
16#include <asm/bootinfo.h> 17#include <asm/bootinfo.h>
@@ -295,21 +296,41 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
295 pudp = pud_offset(pgdp, address); 296 pudp = pud_offset(pgdp, address);
296 pmdp = pmd_offset(pudp, address); 297 pmdp = pmd_offset(pudp, address);
297 idx = read_c0_index(); 298 idx = read_c0_index();
298 ptep = pte_offset_map(pmdp, address); 299#ifdef CONFIG_HUGETLB_PAGE
300 /* this could be a huge page */
301 if (pmd_huge(*pmdp)) {
302 unsigned long lo;
303 write_c0_pagemask(PM_HUGE_MASK);
304 ptep = (pte_t *)pmdp;
305 lo = pte_val(*ptep) >> 6;
306 write_c0_entrylo0(lo);
307 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
308
309 mtc0_tlbw_hazard();
310 if (idx < 0)
311 tlb_write_random();
312 else
313 tlb_write_indexed();
314 write_c0_pagemask(PM_DEFAULT_MASK);
315 } else
316#endif
317 {
318 ptep = pte_offset_map(pmdp, address);
299 319
300#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 320#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
301 write_c0_entrylo0(ptep->pte_high); 321 write_c0_entrylo0(ptep->pte_high);
302 ptep++; 322 ptep++;
303 write_c0_entrylo1(ptep->pte_high); 323 write_c0_entrylo1(ptep->pte_high);
304#else 324#else
305 write_c0_entrylo0(pte_val(*ptep++) >> 6); 325 write_c0_entrylo0(pte_val(*ptep++) >> 6);
306 write_c0_entrylo1(pte_val(*ptep) >> 6); 326 write_c0_entrylo1(pte_val(*ptep) >> 6);
307#endif 327#endif
308 mtc0_tlbw_hazard(); 328 mtc0_tlbw_hazard();
309 if (idx < 0) 329 if (idx < 0)
310 tlb_write_random(); 330 tlb_write_random();
311 else 331 else
312 tlb_write_indexed(); 332 tlb_write_indexed();
333 }
313 tlbw_use_hazard(); 334 tlbw_use_hazard();
314 FLUSH_ITLB_VM(vma); 335 FLUSH_ITLB_VM(vma);
315 EXIT_CRITICAL(flags); 336 EXIT_CRITICAL(flags);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 62fbd0d89aeb..8f606ead826e 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -8,6 +8,7 @@
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
11 * 12 *
12 * ... and the days got worse and worse and now you see 13 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind. 14 * I've gone completly out of my mind.
@@ -83,6 +84,9 @@ enum label_id {
83 label_nopage_tlbm, 84 label_nopage_tlbm,
84 label_smp_pgtable_change, 85 label_smp_pgtable_change,
85 label_r3000_write_probe_fail, 86 label_r3000_write_probe_fail,
87#ifdef CONFIG_HUGETLB_PAGE
88 label_tlb_huge_update,
89#endif
86}; 90};
87 91
88UASM_L_LA(_second_part) 92UASM_L_LA(_second_part)
@@ -99,6 +103,9 @@ UASM_L_LA(_nopage_tlbs)
99UASM_L_LA(_nopage_tlbm) 103UASM_L_LA(_nopage_tlbm)
100UASM_L_LA(_smp_pgtable_change) 104UASM_L_LA(_smp_pgtable_change)
101UASM_L_LA(_r3000_write_probe_fail) 105UASM_L_LA(_r3000_write_probe_fail)
106#ifdef CONFIG_HUGETLB_PAGE
107UASM_L_LA(_tlb_huge_update)
108#endif
102 109
103/* 110/*
104 * For debug purposes. 111 * For debug purposes.
@@ -126,6 +133,7 @@ static inline void dump_handler(const u32 *handler, int count)
126#define C0_TCBIND 2, 2 133#define C0_TCBIND 2, 2
127#define C0_ENTRYLO1 3, 0 134#define C0_ENTRYLO1 3, 0
128#define C0_CONTEXT 4, 0 135#define C0_CONTEXT 4, 0
136#define C0_PAGEMASK 5, 0
129#define C0_BADVADDR 8, 0 137#define C0_BADVADDR 8, 0
130#define C0_ENTRYHI 10, 0 138#define C0_ENTRYHI 10, 0
131#define C0_EPC 14, 0 139#define C0_EPC 14, 0
@@ -383,6 +391,98 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
383 } 391 }
384} 392}
385 393
394#ifdef CONFIG_HUGETLB_PAGE
395static __cpuinit void build_huge_tlb_write_entry(u32 **p,
396 struct uasm_label **l,
397 struct uasm_reloc **r,
398 unsigned int tmp,
399 enum tlb_write_entry wmode)
400{
401 /* Set huge page tlb entry size */
402 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
403 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
404 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
405
406 build_tlb_write_entry(p, l, r, wmode);
407
408 /* Reset default page size */
409 if (PM_DEFAULT_MASK >> 16) {
410 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
411 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
412 uasm_il_b(p, r, label_leave);
413 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
414 } else if (PM_DEFAULT_MASK) {
415 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
416 uasm_il_b(p, r, label_leave);
417 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
418 } else {
419 uasm_il_b(p, r, label_leave);
420 uasm_i_mtc0(p, 0, C0_PAGEMASK);
421 }
422}
423
424/*
425 * Check if Huge PTE is present, if so then jump to LABEL.
426 */
427static void __cpuinit
428build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
429 unsigned int pmd, int lid)
430{
431 UASM_i_LW(p, tmp, 0, pmd);
432 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
433 uasm_il_bnez(p, r, tmp, lid);
434}
435
436static __cpuinit void build_huge_update_entries(u32 **p,
437 unsigned int pte,
438 unsigned int tmp)
439{
440 int small_sequence;
441
442 /*
443 * A huge PTE describes an area the size of the
444 * configured huge page size. This is twice the
445 * of the large TLB entry size we intend to use.
446 * A TLB entry half the size of the configured
447 * huge page size is configured into entrylo0
448 * and entrylo1 to cover the contiguous huge PTE
449 * address space.
450 */
451 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
452
453 /* We can clobber tmp. It isn't used after this.*/
454 if (!small_sequence)
455 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
456
457 UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
458 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
459 /* convert to entrylo1 */
460 if (small_sequence)
461 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
462 else
463 UASM_i_ADDU(p, pte, pte, tmp);
464
465 uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
466}
467
468static __cpuinit void build_huge_handler_tail(u32 **p,
469 struct uasm_reloc **r,
470 struct uasm_label **l,
471 unsigned int pte,
472 unsigned int ptr)
473{
474#ifdef CONFIG_SMP
475 UASM_i_SC(p, pte, 0, ptr);
476 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
477 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
478#else
479 UASM_i_SW(p, pte, 0, ptr);
480#endif
481 build_huge_update_entries(p, pte, ptr);
482 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
483}
484#endif /* CONFIG_HUGETLB_PAGE */
485
386#ifdef CONFIG_64BIT 486#ifdef CONFIG_64BIT
387/* 487/*
388 * TMP and PTR are scratch. 488 * TMP and PTR are scratch.
@@ -689,12 +789,23 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
689 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 789 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
690#endif 790#endif
691 791
792#ifdef CONFIG_HUGETLB_PAGE
793 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
794#endif
795
692 build_get_ptep(&p, K0, K1); 796 build_get_ptep(&p, K0, K1);
693 build_update_entries(&p, K0, K1); 797 build_update_entries(&p, K0, K1);
694 build_tlb_write_entry(&p, &l, &r, tlb_random); 798 build_tlb_write_entry(&p, &l, &r, tlb_random);
695 uasm_l_leave(&l, p); 799 uasm_l_leave(&l, p);
696 uasm_i_eret(&p); /* return from trap */ 800 uasm_i_eret(&p); /* return from trap */
697 801
802#ifdef CONFIG_HUGETLB_PAGE
803 uasm_l_tlb_huge_update(&l, p);
804 UASM_i_LW(&p, K0, 0, K1);
805 build_huge_update_entries(&p, K0, K1);
806 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
807#endif
808
698#ifdef CONFIG_64BIT 809#ifdef CONFIG_64BIT
699 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); 810 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
700#endif 811#endif
@@ -733,7 +844,9 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
733 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 844 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
734 final_len = p - tlb_handler; 845 final_len = p - tlb_handler;
735 } else { 846 } else {
736#ifdef MODULE_START 847#if defined(CONFIG_HUGETLB_PAGE)
848 const enum label_id ls = label_tlb_huge_update;
849#elif defined(MODULE_START)
737 const enum label_id ls = label_module_alloc; 850 const enum label_id ls = label_module_alloc;
738#else 851#else
739 const enum label_id ls = label_vmalloc; 852 const enum label_id ls = label_vmalloc;
@@ -1130,6 +1243,15 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1130 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ 1243 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
1131#endif 1244#endif
1132 1245
1246#ifdef CONFIG_HUGETLB_PAGE
1247 /*
1248 * For huge tlb entries, pmd doesn't contain an address but
1249 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1250 * see if we need to jump to huge tlb processing.
1251 */
1252 build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
1253#endif
1254
1133 UASM_i_MFC0(p, pte, C0_BADVADDR); 1255 UASM_i_MFC0(p, pte, C0_BADVADDR);
1134 UASM_i_LW(p, ptr, 0, ptr); 1256 UASM_i_LW(p, ptr, 0, ptr);
1135 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1257 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
@@ -1187,6 +1309,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1187 build_make_valid(&p, &r, K0, K1); 1309 build_make_valid(&p, &r, K0, K1);
1188 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1310 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1189 1311
1312#ifdef CONFIG_HUGETLB_PAGE
1313 /*
1314 * This is the entry point when build_r4000_tlbchange_handler_head
1315 * spots a huge page.
1316 */
1317 uasm_l_tlb_huge_update(&l, p);
1318 iPTE_LW(&p, K0, K1);
1319 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1320 build_tlb_probe_entry(&p);
1321 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1322 build_huge_handler_tail(&p, &r, &l, K0, K1);
1323#endif
1324
1190 uasm_l_nopage_tlbl(&l, p); 1325 uasm_l_nopage_tlbl(&l, p);
1191 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1326 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1192 uasm_i_nop(&p); 1327 uasm_i_nop(&p);
@@ -1218,6 +1353,20 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
1218 build_make_write(&p, &r, K0, K1); 1353 build_make_write(&p, &r, K0, K1);
1219 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1354 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1220 1355
1356#ifdef CONFIG_HUGETLB_PAGE
1357 /*
1358 * This is the entry point when
1359 * build_r4000_tlbchange_handler_head spots a huge page.
1360 */
1361 uasm_l_tlb_huge_update(&l, p);
1362 iPTE_LW(&p, K0, K1);
1363 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1364 build_tlb_probe_entry(&p);
1365 uasm_i_ori(&p, K0, K0,
1366 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1367 build_huge_handler_tail(&p, &r, &l, K0, K1);
1368#endif
1369
1221 uasm_l_nopage_tlbs(&l, p); 1370 uasm_l_nopage_tlbs(&l, p);
1222 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1371 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1223 uasm_i_nop(&p); 1372 uasm_i_nop(&p);
@@ -1250,6 +1399,20 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
1250 build_make_write(&p, &r, K0, K1); 1399 build_make_write(&p, &r, K0, K1);
1251 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1400 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1252 1401
1402#ifdef CONFIG_HUGETLB_PAGE
1403 /*
1404 * This is the entry point when
1405 * build_r4000_tlbchange_handler_head spots a huge page.
1406 */
1407 uasm_l_tlb_huge_update(&l, p);
1408 iPTE_LW(&p, K0, K1);
1409 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1410 build_tlb_probe_entry(&p);
1411 uasm_i_ori(&p, K0, K0,
1412 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1413 build_huge_handler_tail(&p, &r, &l, K0, K1);
1414#endif
1415
1253 uasm_l_nopage_tlbm(&l, p); 1416 uasm_l_nopage_tlbm(&l, p);
1254 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1417 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1255 uasm_i_nop(&p); 1418 uasm_i_nop(&p);