aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc/include/asm/mmu_64.h3
-rw-r--r--arch/sparc/mm/fault_64.c6
-rw-r--r--arch/sparc/mm/hugetlbpage.c4
-rw-r--r--arch/sparc/mm/init_64.c3
-rw-r--r--arch/sparc/mm/tlb.c4
-rw-r--r--arch/sparc/mm/tsb.c14
6 files changed, 19 insertions, 15 deletions
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index 70067ce184b1..f7de0dbc38af 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -92,7 +92,8 @@ struct tsb_config {
92typedef struct { 92typedef struct {
93 spinlock_t lock; 93 spinlock_t lock;
94 unsigned long sparc64_ctx_val; 94 unsigned long sparc64_ctx_val;
95 unsigned long huge_pte_count; 95 unsigned long hugetlb_pte_count;
96 unsigned long thp_pte_count;
96 struct tsb_config tsb_block[MM_NUM_TSBS]; 97 struct tsb_config tsb_block[MM_NUM_TSBS];
97 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; 98 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
98} mm_context_t; 99} mm_context_t;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 6c43b924a7a2..3a16ba0dc356 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -476,14 +476,14 @@ good_area:
476 up_read(&mm->mmap_sem); 476 up_read(&mm->mmap_sem);
477 477
478 mm_rss = get_mm_rss(mm); 478 mm_rss = get_mm_rss(mm);
479#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 479#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
480 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); 480 mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
481#endif 481#endif
482 if (unlikely(mm_rss > 482 if (unlikely(mm_rss >
483 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) 483 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
484 tsb_grow(mm, MM_TSB_BASE, mm_rss); 484 tsb_grow(mm, MM_TSB_BASE, mm_rss);
485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
486 mm_rss = mm->context.huge_pte_count; 486 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
487 if (unlikely(mm_rss > 487 if (unlikely(mm_rss >
488 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { 488 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
489 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) 489 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ba52e6466a82..d69b66e01b84 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -180,7 +180,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
180 unsigned long nptes; 180 unsigned long nptes;
181 181
182 if (!pte_present(*ptep) && pte_present(entry)) 182 if (!pte_present(*ptep) && pte_present(entry))
183 mm->context.huge_pte_count++; 183 mm->context.hugetlb_pte_count++;
184 184
185 addr &= HPAGE_MASK; 185 addr &= HPAGE_MASK;
186 186
@@ -212,7 +212,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
212 212
213 entry = *ptep; 213 entry = *ptep;
214 if (pte_present(entry)) 214 if (pte_present(entry))
215 mm->context.huge_pte_count--; 215 mm->context.hugetlb_pte_count--;
216 216
217 addr &= HPAGE_MASK; 217 addr &= HPAGE_MASK;
218 nptes = 1 << HUGETLB_PAGE_ORDER; 218 nptes = 1 << HUGETLB_PAGE_ORDER;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index aec508e37490..aef153f9fdac 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -346,7 +346,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
346 spin_lock_irqsave(&mm->context.lock, flags); 346 spin_lock_irqsave(&mm->context.lock, flags);
347 347
348#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 348#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
349 if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) 349 if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
350 is_hugetlb_pte(pte))
350 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, 351 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
351 address, pte_val(pte)); 352 address, pte_val(pte));
352 else 353 else
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index f81cd9736700..3659d37b4d81 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
175 175
176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { 176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
177 if (pmd_val(pmd) & _PAGE_PMD_HUGE) 177 if (pmd_val(pmd) & _PAGE_PMD_HUGE)
178 mm->context.huge_pte_count++; 178 mm->context.thp_pte_count++;
179 else 179 else
180 mm->context.huge_pte_count--; 180 mm->context.thp_pte_count--;
181 181
182 /* Do not try to allocate the TSB hash table if we 182 /* Do not try to allocate the TSB hash table if we
183 * don't have one already. We have various locks held 183 * don't have one already. We have various locks held
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index a0604a493a36..6725ed45580e 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -470,7 +470,7 @@ retry_tsb_alloc:
470int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 470int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
471{ 471{
472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
473 unsigned long huge_pte_count; 473 unsigned long total_huge_pte_count;
474#endif 474#endif
475 unsigned int i; 475 unsigned int i;
476 476
@@ -479,12 +479,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
479 mm->context.sparc64_ctx_val = 0UL; 479 mm->context.sparc64_ctx_val = 0UL;
480 480
481#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 481#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
482 /* We reset it to zero because the fork() page copying 482 /* We reset them to zero because the fork() page copying
483 * will re-increment the counters as the parent PTEs are 483 * will re-increment the counters as the parent PTEs are
484 * copied into the child address space. 484 * copied into the child address space.
485 */ 485 */
486 huge_pte_count = mm->context.huge_pte_count; 486 total_huge_pte_count = mm->context.hugetlb_pte_count +
487 mm->context.huge_pte_count = 0; 487 mm->context.thp_pte_count;
488 mm->context.hugetlb_pte_count = 0;
489 mm->context.thp_pte_count = 0;
488#endif 490#endif
489 491
490 /* copy_mm() copies over the parent's mm_struct before calling 492 /* copy_mm() copies over the parent's mm_struct before calling
@@ -500,8 +502,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
500 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 502 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
501 503
502#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 504#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
503 if (unlikely(huge_pte_count)) 505 if (unlikely(total_huge_pte_count))
504 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); 506 tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
505#endif 507#endif
506 508
507 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) 509 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))