aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc/include/asm/page_64.h1
-rw-r--r--arch/sparc/mm/fault_64.c1
-rw-r--r--arch/sparc/mm/tlb.c35
-rw-r--r--arch/sparc/mm/tsb.c18
4 files changed, 45 insertions, 10 deletions
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 8c2a8c937540..c1263fc390db 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -25,6 +25,7 @@
25#define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) 25#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
26#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 26#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
27#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 27#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
28#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
28#endif 29#endif
29 30
30#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index e16fdd28a931..3f291d8c57f7 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -484,6 +484,7 @@ good_area:
484 tsb_grow(mm, MM_TSB_BASE, mm_rss); 484 tsb_grow(mm, MM_TSB_BASE, mm_rss);
485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
486 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; 486 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
487 mm_rss *= REAL_HPAGE_PER_HPAGE;
487 if (unlikely(mm_rss > 488 if (unlikely(mm_rss >
488 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { 489 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
489 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) 490 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3659d37b4d81..c56a195c9071 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
174 return; 174 return;
175 175
176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { 176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
177 if (pmd_val(pmd) & _PAGE_PMD_HUGE) 177 /*
178 mm->context.thp_pte_count++; 178 * Note that this routine only sets pmds for THP pages.
179 else 179 * Hugetlb pages are handled elsewhere. We need to check
180 mm->context.thp_pte_count--; 180 * for huge zero page. Huge zero pages are like hugetlb
181 * pages in that there is no RSS, but there is the need
182 * for TSB entries. So, huge zero page counts go into
183 * hugetlb_pte_count.
184 */
185 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
186 if (is_huge_zero_page(pmd_page(pmd)))
187 mm->context.hugetlb_pte_count++;
188 else
189 mm->context.thp_pte_count++;
190 } else {
191 if (is_huge_zero_page(pmd_page(orig)))
192 mm->context.hugetlb_pte_count--;
193 else
194 mm->context.thp_pte_count--;
195 }
181 196
182 /* Do not try to allocate the TSB hash table if we 197 /* Do not try to allocate the TSB hash table if we
183 * don't have one already. We have various locks held 198 * don't have one already. We have various locks held
@@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
204 } 219 }
205} 220}
206 221
222/*
223 * This routine is only called when splitting a THP
224 */
207void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 225void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
208 pmd_t *pmdp) 226 pmd_t *pmdp)
209{ 227{
@@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
213 231
214 set_pmd_at(vma->vm_mm, address, pmdp, entry); 232 set_pmd_at(vma->vm_mm, address, pmdp, entry);
215 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 233 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
234
235 /*
236 * set_pmd_at() will not be called in a way to decrement
237 * thp_pte_count when splitting a THP, so do it now.
238 * Sanity check pmd before doing the actual decrement.
239 */
240 if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
241 !is_huge_zero_page(pmd_page(entry)))
242 (vma->vm_mm)->context.thp_pte_count--;
216} 243}
217 244
218void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 245void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 6725ed45580e..f2b77112e9d8 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -469,8 +469,10 @@ retry_tsb_alloc:
469 469
470int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 470int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
471{ 471{
472 unsigned long mm_rss = get_mm_rss(mm);
472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 473#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
473 unsigned long total_huge_pte_count; 474 unsigned long saved_hugetlb_pte_count;
475 unsigned long saved_thp_pte_count;
474#endif 476#endif
475 unsigned int i; 477 unsigned int i;
476 478
@@ -483,10 +485,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
483 * will re-increment the counters as the parent PTEs are 485 * will re-increment the counters as the parent PTEs are
484 * copied into the child address space. 486 * copied into the child address space.
485 */ 487 */
486 total_huge_pte_count = mm->context.hugetlb_pte_count + 488 saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
487 mm->context.thp_pte_count; 489 saved_thp_pte_count = mm->context.thp_pte_count;
488 mm->context.hugetlb_pte_count = 0; 490 mm->context.hugetlb_pte_count = 0;
489 mm->context.thp_pte_count = 0; 491 mm->context.thp_pte_count = 0;
492
493 mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
490#endif 494#endif
491 495
492 /* copy_mm() copies over the parent's mm_struct before calling 496 /* copy_mm() copies over the parent's mm_struct before calling
@@ -499,11 +503,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
499 /* If this is fork, inherit the parent's TSB size. We would 503 /* If this is fork, inherit the parent's TSB size. We would
500 * grow it to that size on the first page fault anyways. 504 * grow it to that size on the first page fault anyways.
501 */ 505 */
502 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 506 tsb_grow(mm, MM_TSB_BASE, mm_rss);
503 507
504#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 508#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
505 if (unlikely(total_huge_pte_count)) 509 if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
506 tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); 510 tsb_grow(mm, MM_TSB_HUGE,
511 (saved_hugetlb_pte_count + saved_thp_pte_count) *
512 REAL_HPAGE_PER_HPAGE);
507#endif 513#endif
508 514
509 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) 515 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))