aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-12-12 19:44:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:08 -0500
commit953c66c2b22a304dbc3c3d7fc8e8c25cd97a03d8 (patch)
treec851d72f1be182bc4ae4857ff720e34835e68371 /mm/huge_memory.c
parent1dd38b6c27d59414e89c08dd1ae9677a8e12cbc4 (diff)
mm: THP page cache support for ppc64
Add arch specific callback in the generic THP page cache code that will deposit and withdarw preallocated page table. Archs like ppc64 use this preallocated table to store the hash pte slot information. Testing: kernel build of the patch series on tmpfs mounted with option huge=always The related thp stat: thp_fault_alloc 72939 thp_fault_fallback 60547 thp_collapse_alloc 603 thp_collapse_alloc_failed 0 thp_file_alloc 253763 thp_file_mapped 4251 thp_split_page 51518 thp_split_page_failed 1 thp_deferred_split_page 73566 thp_split_pmd 665 thp_zero_page_alloc 3 thp_zero_page_alloc_failed 0 [akpm@linux-foundation.org: remove unneeded parentheses, per Kirill] Link: http://lkml.kernel.org/r/20161113150025.17942-2-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Michael Neuling <mikey@neuling.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b54044c21076..2b44ac11178f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1380,6 +1380,15 @@ out_unlocked:
1380 return ret; 1380 return ret;
1381} 1381}
1382 1382
1383static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1384{
1385 pgtable_t pgtable;
1386
1387 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1388 pte_free(mm, pgtable);
1389 atomic_long_dec(&mm->nr_ptes);
1390}
1391
1383int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1392int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1384 pmd_t *pmd, unsigned long addr) 1393 pmd_t *pmd, unsigned long addr)
1385{ 1394{
@@ -1421,6 +1430,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1421 atomic_long_dec(&tlb->mm->nr_ptes); 1430 atomic_long_dec(&tlb->mm->nr_ptes);
1422 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1431 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1423 } else { 1432 } else {
1433 if (arch_needs_pgtable_deposit())
1434 zap_deposited_table(tlb->mm, pmd);
1424 add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); 1435 add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR);
1425 } 1436 }
1426 spin_unlock(ptl); 1437 spin_unlock(ptl);
@@ -1607,6 +1618,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1607 1618
1608 if (!vma_is_anonymous(vma)) { 1619 if (!vma_is_anonymous(vma)) {
1609 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 1620 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1621 /*
1622 * We are going to unmap this huge page. So
1623 * just go ahead and zap it
1624 */
1625 if (arch_needs_pgtable_deposit())
1626 zap_deposited_table(mm, pmd);
1610 if (vma_is_dax(vma)) 1627 if (vma_is_dax(vma))
1611 return; 1628 return;
1612 page = pmd_page(_pmd); 1629 page = pmd_page(_pmd);