aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2012-10-08 19:34:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:23:06 -0400
commit9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (patch)
tree77528ae73fe70d1bae3ced18a50e59fd81d2372c /arch/sparc/mm
parentf5c8ad47284ca01dafc37da5a72bb9644174d387 (diff)
sparc64: Support transparent huge pages.
This is relatively easy since PMD's now cover exactly 4MB of memory. Our PMD entries are 32-bits each, so we use a special encoding. The lowest bit, PMD_ISHUGE, determines the interpretation. This is possible because sparc64's page tables are purely software entities so we can use whatever encoding scheme we want. We just have to make the TLB miss assembler page table walkers aware of the layout. set_pmd_at() works much like set_pte_at() but it has to operate in two page from a table of non-huge PTEs, so we have to queue up TLB flushes based upon what mappings are valid in the PTE table. In the second regime we are going from huge-page to non-huge-page, and in that case we need only queue up a single TLB flush to push out the huge page mapping. We still have 5 bits remaining in the huge PMD encoding so we can very likely support any new pieces of THP state tracking that might get added in the future. With lots of help from Johannes Weiner. Signed-off-by: David S. Miller <davem@davemloft.net> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/fault_64.c4
-rw-r--r--arch/sparc/mm/hugetlbpage.c50
-rw-r--r--arch/sparc/mm/init_64.c204
-rw-r--r--arch/sparc/mm/tlb.c118
-rw-r--r--arch/sparc/mm/tsb.c14
5 files changed, 306 insertions, 84 deletions
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 413d2926330..2976dba1eba 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -465,13 +465,13 @@ good_area:
465 up_read(&mm->mmap_sem); 465 up_read(&mm->mmap_sem);
466 466
467 mm_rss = get_mm_rss(mm); 467 mm_rss = get_mm_rss(mm);
468#ifdef CONFIG_HUGETLB_PAGE 468#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
469 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); 469 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
470#endif 470#endif
471 if (unlikely(mm_rss > 471 if (unlikely(mm_rss >
472 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) 472 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
473 tsb_grow(mm, MM_TSB_BASE, mm_rss); 473 tsb_grow(mm, MM_TSB_BASE, mm_rss);
474#ifdef CONFIG_HUGETLB_PAGE 474#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
475 mm_rss = mm->context.huge_pte_count; 475 mm_rss = mm->context.huge_pte_count;
476 if (unlikely(mm_rss > 476 if (unlikely(mm_rss >
477 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) 477 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 07e14535375..f76f83d5ac6 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -303,53 +303,3 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
303{ 303{
304 return NULL; 304 return NULL;
305} 305}
306
307static void context_reload(void *__data)
308{
309 struct mm_struct *mm = __data;
310
311 if (mm == current->mm)
312 load_secondary_context(mm);
313}
314
315void hugetlb_prefault_arch_hook(struct mm_struct *mm)
316{
317 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
318
319 if (likely(tp->tsb != NULL))
320 return;
321
322 tsb_grow(mm, MM_TSB_HUGE, 0);
323 tsb_context_switch(mm);
324 smp_tsb_sync(mm);
325
326 /* On UltraSPARC-III+ and later, configure the second half of
327 * the Data-TLB for huge pages.
328 */
329 if (tlb_type == cheetah_plus) {
330 unsigned long ctx;
331
332 spin_lock(&ctx_alloc_lock);
333 ctx = mm->context.sparc64_ctx_val;
334 ctx &= ~CTX_PGSZ_MASK;
335 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
336 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
337
338 if (ctx != mm->context.sparc64_ctx_val) {
339 /* When changing the page size fields, we
340 * must perform a context flush so that no
341 * stale entries match. This flush must
342 * occur with the original context register
343 * settings.
344 */
345 do_flush_tlb_mm(mm);
346
347 /* Reload the context register of all processors
348 * also executing in this address space.
349 */
350 mm->context.sparc64_ctx_val = ctx;
351 on_each_cpu(context_reload, mm, 0);
352 }
353 spin_unlock(&ctx_alloc_lock);
354 }
355}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 12ef4ea60c8..9e28a118e6a 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -306,12 +306,24 @@ static void flush_dcache(unsigned long pfn)
306 } 306 }
307} 307}
308 308
309/* mm->context.lock must be held */
310static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
311 unsigned long tsb_hash_shift, unsigned long address,
312 unsigned long tte)
313{
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
315 unsigned long tag;
316
317 tsb += ((address >> tsb_hash_shift) &
318 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
319 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, tte);
321}
322
309void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 323void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
310{ 324{
325 unsigned long tsb_index, tsb_hash_shift, flags;
311 struct mm_struct *mm; 326 struct mm_struct *mm;
312 struct tsb *tsb;
313 unsigned long tag, flags;
314 unsigned long tsb_index, tsb_hash_shift;
315 pte_t pte = *ptep; 327 pte_t pte = *ptep;
316 328
317 if (tlb_type != hypervisor) { 329 if (tlb_type != hypervisor) {
@@ -328,7 +340,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
328 340
329 spin_lock_irqsave(&mm->context.lock, flags); 341 spin_lock_irqsave(&mm->context.lock, flags);
330 342
331#ifdef CONFIG_HUGETLB_PAGE 343#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
332 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { 344 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
333 if ((tlb_type == hypervisor && 345 if ((tlb_type == hypervisor &&
334 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 346 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
@@ -340,11 +352,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
340 } 352 }
341#endif 353#endif
342 354
343 tsb = mm->context.tsb_block[tsb_index].tsb; 355 __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift,
344 tsb += ((address >> tsb_hash_shift) & 356 address, pte_val(pte));
345 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
346 tag = (address >> 22UL);
347 tsb_insert(tsb, tag, pte_val(pte));
348 357
349 spin_unlock_irqrestore(&mm->context.lock, flags); 358 spin_unlock_irqrestore(&mm->context.lock, flags);
350} 359}
@@ -2568,3 +2577,180 @@ void pgtable_free(void *table, bool is_page)
2568 else 2577 else
2569 kmem_cache_free(pgtable_cache, table); 2578 kmem_cache_free(pgtable_cache, table);
2570} 2579}
2580
2581#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2582static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
2583{
2584 if (pgprot_val(pgprot) & _PAGE_VALID)
2585 pmd_val(pmd) |= PMD_HUGE_PRESENT;
2586 if (tlb_type == hypervisor) {
2587 if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
2588 pmd_val(pmd) |= PMD_HUGE_WRITE;
2589 if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
2590 pmd_val(pmd) |= PMD_HUGE_EXEC;
2591
2592 if (!for_modify) {
2593 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
2594 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2595 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
2596 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2597 }
2598 } else {
2599 if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
2600 pmd_val(pmd) |= PMD_HUGE_WRITE;
2601 if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
2602 pmd_val(pmd) |= PMD_HUGE_EXEC;
2603
2604 if (!for_modify) {
2605 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
2606 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2607 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
2608 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2609 }
2610 }
2611
2612 return pmd;
2613}
2614
2615pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
2616{
2617 pmd_t pmd;
2618
2619 pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
2620 pmd_val(pmd) |= PMD_ISHUGE;
2621 pmd = pmd_set_protbits(pmd, pgprot, false);
2622 return pmd;
2623}
2624
2625pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
2626{
2627 pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
2628 PMD_HUGE_WRITE |
2629 PMD_HUGE_EXEC);
2630 pmd = pmd_set_protbits(pmd, newprot, true);
2631 return pmd;
2632}
2633
2634pgprot_t pmd_pgprot(pmd_t entry)
2635{
2636 unsigned long pte = 0;
2637
2638 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2639 pte |= _PAGE_VALID;
2640
2641 if (tlb_type == hypervisor) {
2642 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2643 pte |= _PAGE_PRESENT_4V;
2644 if (pmd_val(entry) & PMD_HUGE_EXEC)
2645 pte |= _PAGE_EXEC_4V;
2646 if (pmd_val(entry) & PMD_HUGE_WRITE)
2647 pte |= _PAGE_W_4V;
2648 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2649 pte |= _PAGE_ACCESSED_4V;
2650 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2651 pte |= _PAGE_MODIFIED_4V;
2652 pte |= _PAGE_CP_4V|_PAGE_CV_4V;
2653 } else {
2654 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2655 pte |= _PAGE_PRESENT_4U;
2656 if (pmd_val(entry) & PMD_HUGE_EXEC)
2657 pte |= _PAGE_EXEC_4U;
2658 if (pmd_val(entry) & PMD_HUGE_WRITE)
2659 pte |= _PAGE_W_4U;
2660 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2661 pte |= _PAGE_ACCESSED_4U;
2662 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2663 pte |= _PAGE_MODIFIED_4U;
2664 pte |= _PAGE_CP_4U|_PAGE_CV_4U;
2665 }
2666
2667 return __pgprot(pte);
2668}
2669
2670void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2671 pmd_t *pmd)
2672{
2673 unsigned long pte, flags;
2674 struct mm_struct *mm;
2675 pmd_t entry = *pmd;
2676 pgprot_t prot;
2677
2678 if (!pmd_large(entry) || !pmd_young(entry))
2679 return;
2680
2681 pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS);
2682 pte <<= PMD_PADDR_SHIFT;
2683 pte |= _PAGE_VALID;
2684
2685 prot = pmd_pgprot(entry);
2686
2687 if (tlb_type == hypervisor)
2688 pgprot_val(prot) |= _PAGE_SZHUGE_4V;
2689 else
2690 pgprot_val(prot) |= _PAGE_SZHUGE_4U;
2691
2692 pte |= pgprot_val(prot);
2693
2694 mm = vma->vm_mm;
2695
2696 spin_lock_irqsave(&mm->context.lock, flags);
2697
2698 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2699 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
2700 addr, pte);
2701
2702 spin_unlock_irqrestore(&mm->context.lock, flags);
2703}
2704#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2705
2706#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2707static void context_reload(void *__data)
2708{
2709 struct mm_struct *mm = __data;
2710
2711 if (mm == current->mm)
2712 load_secondary_context(mm);
2713}
2714
2715void hugetlb_setup(struct mm_struct *mm)
2716{
2717 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
2718
2719 if (likely(tp->tsb != NULL))
2720 return;
2721
2722 tsb_grow(mm, MM_TSB_HUGE, 0);
2723 tsb_context_switch(mm);
2724 smp_tsb_sync(mm);
2725
2726 /* On UltraSPARC-III+ and later, configure the second half of
2727 * the Data-TLB for huge pages.
2728 */
2729 if (tlb_type == cheetah_plus) {
2730 unsigned long ctx;
2731
2732 spin_lock(&ctx_alloc_lock);
2733 ctx = mm->context.sparc64_ctx_val;
2734 ctx &= ~CTX_PGSZ_MASK;
2735 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2736 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2737
2738 if (ctx != mm->context.sparc64_ctx_val) {
2739 /* When changing the page size fields, we
2740 * must perform a context flush so that no
2741 * stale entries match. This flush must
2742 * occur with the original context register
2743 * settings.
2744 */
2745 do_flush_tlb_mm(mm);
2746
2747 /* Reload the context register of all processors
2748 * also executing in this address space.
2749 */
2750 mm->context.sparc64_ctx_val = ctx;
2751 on_each_cpu(context_reload, mm, 0);
2752 }
2753 spin_unlock(&ctx_alloc_lock);
2754 }
2755}
2756#endif
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b1f279cd00b..3e8fec391fe 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -43,16 +43,37 @@ void flush_tlb_pending(void)
43 put_cpu_var(tlb_batch); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, 46static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm) 47 bool exec)
48{ 48{
49 struct tlb_batch *tb = &get_cpu_var(tlb_batch); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
50 unsigned long nr; 50 unsigned long nr;
51 51
52 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
53 if (pte_exec(orig)) 53 if (exec)
54 vaddr |= 0x1UL; 54 vaddr |= 0x1UL;
55 55
56 nr = tb->tlb_nr;
57
58 if (unlikely(nr != 0 && mm != tb->mm)) {
59 flush_tlb_pending();
60 nr = 0;
61 }
62
63 if (nr == 0)
64 tb->mm = mm;
65
66 tb->vaddrs[nr] = vaddr;
67 tb->tlb_nr = ++nr;
68 if (nr >= TLB_BATCH_NR)
69 flush_tlb_pending();
70
71 put_cpu_var(tlb_batch);
72}
73
74void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
75 pte_t *ptep, pte_t orig, int fullmm)
76{
56 if (tlb_type != hypervisor && 77 if (tlb_type != hypervisor &&
57 pte_dirty(orig)) { 78 pte_dirty(orig)) {
58 unsigned long paddr, pfn = pte_pfn(orig); 79 unsigned long paddr, pfn = pte_pfn(orig);
@@ -77,26 +98,91 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
77 } 98 }
78 99
79no_cache_flush: 100no_cache_flush:
101 if (!fullmm)
102 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
103}
104
105#ifdef CONFIG_TRANSPARENT_HUGEPAGE
106static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
107 pmd_t pmd, bool exec)
108{
109 unsigned long end;
110 pte_t *pte;
111
112 pte = pte_offset_map(&pmd, vaddr);
113 end = vaddr + HPAGE_SIZE;
114 while (vaddr < end) {
115 if (pte_val(*pte) & _PAGE_VALID)
116 tlb_batch_add_one(mm, vaddr, exec);
117 pte++;
118 vaddr += PAGE_SIZE;
119 }
120 pte_unmap(pte);
121}
80 122
81 if (fullmm) { 123void set_pmd_at(struct mm_struct *mm, unsigned long addr,
82 put_cpu_var(tlb_batch); 124 pmd_t *pmdp, pmd_t pmd)
125{
126 pmd_t orig = *pmdp;
127
128 *pmdp = pmd;
129
130 if (mm == &init_mm)
83 return; 131 return;
132
133 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
134 if (pmd_val(pmd) & PMD_ISHUGE)
135 mm->context.huge_pte_count++;
136 else
137 mm->context.huge_pte_count--;
138 if (mm->context.huge_pte_count == 1)
139 hugetlb_setup(mm);
84 } 140 }
85 141
86 nr = tb->tlb_nr; 142 if (!pmd_none(orig)) {
143 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
87 144
88 if (unlikely(nr != 0 && mm != tb->mm)) { 145 addr &= HPAGE_MASK;
89 flush_tlb_pending(); 146 if (pmd_val(orig) & PMD_ISHUGE)
90 nr = 0; 147 tlb_batch_add_one(mm, addr, exec);
148 else
149 tlb_batch_pmd_scan(mm, addr, orig, exec);
91 } 150 }
151}
92 152
93 if (nr == 0) 153void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
94 tb->mm = mm; 154{
155 struct list_head *lh = (struct list_head *) pgtable;
95 156
96 tb->vaddrs[nr] = vaddr; 157 assert_spin_locked(&mm->page_table_lock);
97 tb->tlb_nr = ++nr;
98 if (nr >= TLB_BATCH_NR)
99 flush_tlb_pending();
100 158
101 put_cpu_var(tlb_batch); 159 /* FIFO */
160 if (!mm->pmd_huge_pte)
161 INIT_LIST_HEAD(lh);
162 else
163 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
164 mm->pmd_huge_pte = pgtable;
165}
166
167pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
168{
169 struct list_head *lh;
170 pgtable_t pgtable;
171
172 assert_spin_locked(&mm->page_table_lock);
173
174 /* FIFO */
175 pgtable = mm->pmd_huge_pte;
176 lh = (struct list_head *) pgtable;
177 if (list_empty(lh))
178 mm->pmd_huge_pte = NULL;
179 else {
180 mm->pmd_huge_pte = (pgtable_t) lh->next;
181 list_del(lh);
182 }
183 pte_val(pgtable[0]) = 0;
184 pte_val(pgtable[1]) = 0;
185
186 return pgtable;
102} 187}
188#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index a35ee832baf..7f647434749 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -78,7 +78,7 @@ void flush_tsb_user(struct tlb_batch *tb)
78 base = __pa(base); 78 base = __pa(base);
79 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); 79 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
80 80
81#ifdef CONFIG_HUGETLB_PAGE 81#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
83 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; 83 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
@@ -93,7 +93,7 @@ void flush_tsb_user(struct tlb_batch *tb)
93#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K 93#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
94#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K 94#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
95 95
96#ifdef CONFIG_HUGETLB_PAGE 96#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
97#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB 97#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
98#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB 98#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
99#endif 99#endif
@@ -190,7 +190,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
190 case MM_TSB_BASE: 190 case MM_TSB_BASE:
191 hp->pgsz_idx = HV_PGSZ_IDX_BASE; 191 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
192 break; 192 break;
193#ifdef CONFIG_HUGETLB_PAGE 193#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
194 case MM_TSB_HUGE: 194 case MM_TSB_HUGE:
195 hp->pgsz_idx = HV_PGSZ_IDX_HUGE; 195 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
196 break; 196 break;
@@ -205,7 +205,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
205 case MM_TSB_BASE: 205 case MM_TSB_BASE:
206 hp->pgsz_mask = HV_PGSZ_MASK_BASE; 206 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
207 break; 207 break;
208#ifdef CONFIG_HUGETLB_PAGE 208#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
209 case MM_TSB_HUGE: 209 case MM_TSB_HUGE:
210 hp->pgsz_mask = HV_PGSZ_MASK_HUGE; 210 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
211 break; 211 break;
@@ -427,7 +427,7 @@ retry_tsb_alloc:
427 427
428int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 428int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
429{ 429{
430#ifdef CONFIG_HUGETLB_PAGE 430#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
431 unsigned long huge_pte_count; 431 unsigned long huge_pte_count;
432#endif 432#endif
433 unsigned int i; 433 unsigned int i;
@@ -436,7 +436,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
436 436
437 mm->context.sparc64_ctx_val = 0UL; 437 mm->context.sparc64_ctx_val = 0UL;
438 438
439#ifdef CONFIG_HUGETLB_PAGE 439#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
440 /* We reset it to zero because the fork() page copying 440 /* We reset it to zero because the fork() page copying
441 * will re-increment the counters as the parent PTEs are 441 * will re-increment the counters as the parent PTEs are
442 * copied into the child address space. 442 * copied into the child address space.
@@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
459 */ 459 */
460 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 460 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
461 461
462#ifdef CONFIG_HUGETLB_PAGE 462#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
463 if (unlikely(huge_pte_count)) 463 if (unlikely(huge_pte_count))
464 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); 464 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
465#endif 465#endif