aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEbru Akagunduz <ebru.akagunduz@gmail.com>2016-09-19 17:44:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-09-19 18:36:16 -0400
commit982785c6b05a82c01e90687b7e25ee87c8970b2e (patch)
treed855120e0185c802dcb3c222fc94abe31a279b3c
parentc131f751ab1a852d4dd4b490b3a7fbba7d738de5 (diff)
mm, thp: fix leaking mapped pte in __collapse_huge_page_swapin()
Currently, khugepaged does not permit swapin if there are enough young pages in a THP. The problem is when a THP does not have enough young pages, khugepaged leaks mapped ptes. This patch prohibits leaking mapped ptes. Link: http://lkml.kernel.org/r/1472820276-7831-1-git-send-email-ebru.akagunduz@gmail.com Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> Suggested-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/khugepaged.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 62339bf3c726..728d7790dc2d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -882,6 +882,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
882 .pmd = pmd, 882 .pmd = pmd,
883 }; 883 };
884 884
885 /* we only decide to swapin, if there is enough young ptes */
886 if (referenced < HPAGE_PMD_NR/2) {
887 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
888 return false;
889 }
885 fe.pte = pte_offset_map(pmd, address); 890 fe.pte = pte_offset_map(pmd, address);
886 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; 891 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
887 fe.pte++, fe.address += PAGE_SIZE) { 892 fe.pte++, fe.address += PAGE_SIZE) {
@@ -889,11 +894,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
889 if (!is_swap_pte(pteval)) 894 if (!is_swap_pte(pteval))
890 continue; 895 continue;
891 swapped_in++; 896 swapped_in++;
892 /* we only decide to swapin, if there is enough young ptes */
893 if (referenced < HPAGE_PMD_NR/2) {
894 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
895 return false;
896 }
897 ret = do_swap_page(&fe, pteval); 897 ret = do_swap_page(&fe, pteval);
898 898
899 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ 899 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */