aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYang Shi <yang.shi@linux.alibaba.com>2018-08-17 18:45:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 19:20:28 -0400
commitc2231020ea7b53d486395dbd8d3216e0dd1fc7ee (patch)
tree404b8ca035d3c8eecfb4a31bb06e1ff86a67b7a6
parent8cded8668e1f49ab9b90682bca76e861782416e9 (diff)
mm: thp: register mm for khugepaged when merging vma for shmem
When merging anonymous page vma, if the size of the vma can fit in at least one hugepage, the mm will be registered for khugepaged for collapsing THP in the future. But it skips shmem vmas. Do so for shmem also, but not for file-private mappings when merging a vma in order to increase the odds of collapsing a hugepage via khugepaged. hugepage_vma_check() sounds like a good fit to do the check. And move the definition of it before khugepaged_enter_vma_merge() to avoid a build error. Link: http://lkml.kernel.org/r/1529697791-6950-1-git-send-email-yang.shi@linux.alibaba.com Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/khugepaged.c53
1 files changed, 26 insertions, 27 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index d7b2a4bf8671..22da712022de 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -397,6 +397,25 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
397 return atomic_read(&mm->mm_users) == 0; 397 return atomic_read(&mm->mm_users) == 0;
398} 398}
399 399
400static bool hugepage_vma_check(struct vm_area_struct *vma)
401{
402 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
403 (vma->vm_flags & VM_NOHUGEPAGE) ||
404 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
405 return false;
406 if (shmem_file(vma->vm_file)) {
407 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
408 return false;
409 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
410 HPAGE_PMD_NR);
411 }
412 if (!vma->anon_vma || vma->vm_ops)
413 return false;
414 if (is_vma_temporary_stack(vma))
415 return false;
416 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
417}
418
400int __khugepaged_enter(struct mm_struct *mm) 419int __khugepaged_enter(struct mm_struct *mm)
401{ 420{
402 struct mm_slot *mm_slot; 421 struct mm_slot *mm_slot;
@@ -434,15 +453,14 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
434 unsigned long vm_flags) 453 unsigned long vm_flags)
435{ 454{
436 unsigned long hstart, hend; 455 unsigned long hstart, hend;
437 if (!vma->anon_vma) 456
438 /* 457 /*
439 * Not yet faulted in so we will register later in the 458 * khugepaged does not yet work on non-shmem files or special
440 * page fault if needed. 459 * mappings. And file-private shmem THP is not supported.
441 */ 460 */
442 return 0; 461 if (!hugepage_vma_check(vma))
443 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
444 /* khugepaged not yet working on file or special mappings */
445 return 0; 462 return 0;
463
446 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 464 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
447 hend = vma->vm_end & HPAGE_PMD_MASK; 465 hend = vma->vm_end & HPAGE_PMD_MASK;
448 if (hstart < hend) 466 if (hstart < hend)
@@ -819,25 +837,6 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
819} 837}
820#endif 838#endif
821 839
822static bool hugepage_vma_check(struct vm_area_struct *vma)
823{
824 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
825 (vma->vm_flags & VM_NOHUGEPAGE) ||
826 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
827 return false;
828 if (shmem_file(vma->vm_file)) {
829 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
830 return false;
831 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
832 HPAGE_PMD_NR);
833 }
834 if (!vma->anon_vma || vma->vm_ops)
835 return false;
836 if (is_vma_temporary_stack(vma))
837 return false;
838 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
839}
840
841/* 840/*
842 * If mmap_sem temporarily dropped, revalidate vma 841 * If mmap_sem temporarily dropped, revalidate vma
843 * before taking mmap_sem. 842 * before taking mmap_sem.