aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-01-10 19:57:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-10 21:31:54 -0500
commitd670ffd87509b6b136d8ed6f757851a8ebe442b2 (patch)
tree3c0a12a54869fd87c4e36940f5629d345abcad35
parent965d004af54088d138f806d04d803fb60d441986 (diff)
mm/thp/pagecache/collapse: free the pte page table on collapse for thp page cache.
With THP page cache, when trying to build a huge page from regular pte pages, we just clear the pmd entry. We will take another fault and at that point we will find the huge page in the radix tree, thereby using the huge page to complete the page fault The second fault path will allocate the needed pgtable_t page for archs like ppc64. So no need to deposit the same in collapse path. Depositing them in the collapse path resulting in a pgtable_t memory leak also giving errors like BUG: non-zero nr_ptes on freeing mm: 3 Fixes: 953c66c2b22a ("mm: THP page cache support for ppc64") Link: http://lkml.kernel.org/r/20161212163428.6780-2-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/khugepaged.c21
1 files changed, 2 insertions, 19 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e32389a97030..b0924a68cc36 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1242,7 +1242,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1242 struct vm_area_struct *vma; 1242 struct vm_area_struct *vma;
1243 unsigned long addr; 1243 unsigned long addr;
1244 pmd_t *pmd, _pmd; 1244 pmd_t *pmd, _pmd;
1245 bool deposited = false;
1246 1245
1247 i_mmap_lock_write(mapping); 1246 i_mmap_lock_write(mapping);
1248 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1247 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1267,26 +1266,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1267 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); 1266 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1268 /* assume page table is clear */ 1267 /* assume page table is clear */
1269 _pmd = pmdp_collapse_flush(vma, addr, pmd); 1268 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1270 /*
1271 * now deposit the pgtable for arch that need it
1272 * otherwise free it.
1273 */
1274 if (arch_needs_pgtable_deposit()) {
1275 /*
1276 * The deposit should be visibile only after
1277 * collapse is seen by others.
1278 */
1279 smp_wmb();
1280 pgtable_trans_huge_deposit(vma->vm_mm, pmd,
1281 pmd_pgtable(_pmd));
1282 deposited = true;
1283 }
1284 spin_unlock(ptl); 1269 spin_unlock(ptl);
1285 up_write(&vma->vm_mm->mmap_sem); 1270 up_write(&vma->vm_mm->mmap_sem);
1286 if (!deposited) { 1271 atomic_long_dec(&vma->vm_mm->nr_ptes);
1287 atomic_long_dec(&vma->vm_mm->nr_ptes); 1272 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1288 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1289 }
1290 } 1273 }
1291 } 1274 }
1292 i_mmap_unlock_write(mapping); 1275 i_mmap_unlock_write(mapping);