aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2015-02-11 18:25:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:01 -0500
commitcbef8478bee55775ac312a574aad48af7bb9cf9f (patch)
tree499822b6d58e7239c4266bb940e73b47de8280ce
parent61f77eda9bbf0d2e922197ed2dcf88638a639ce5 (diff)
mm/hugetlb: pmd_huge() returns true for non-present hugepage
Migrating hugepages and hwpoisoned hugepages are considered as non-present hugepages, and they are referenced via migration entries and hwpoison entries in their page table slots. This behavior causes race condition because pmd_huge() doesn't tell non-huge pages from migrating/hwpoisoned hugepages. follow_page_mask() is one example where the kernel would call follow_page_pte() for such hugepage while this function is supposed to handle only normal pages. To avoid this, this patch makes pmd_huge() return true when pmd_none() is true *and* pmd_present() is false. We don't have to worry about mixing up non-present pmd entry with normal pmd (pointing to leaf level pte entry) because pmd_present() is true in normal pmd. The same race condition could happen in (x86-specific) gup_pmd_range(), where this patch simply adds pmd_present() check instead of pmd_huge(). This is because gup_pmd_range() is fast path. If we have non-present hugepage in this function, we will go into gup_huge_pmd(), then return 0 at flag mask check, and finally fall back to the slow path. Fixes: 290408d4a2 ("hugetlb: hugepage migration core") Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Hugh Dickins <hughd@google.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Nishanth Aravamudan <nacc@linux.vnet.ibm.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: <stable@vger.kernel.org> [2.6.36+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/hugetlbpage.c8
-rw-r--r--mm/hugetlb.c2
3 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index d7547824e763..224b14235e96 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
172 */ 172 */
173 if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 173 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
174 return 0; 174 return 0;
175 if (unlikely(pmd_large(pmd))) { 175 if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
176 /* 176 /*
177 * NUMA hinting faults need to be handled in the GUP 177 * NUMA hinting faults need to be handled in the GUP
178 * slowpath for accounting purposes and so that they 178 * slowpath for accounting purposes and so that they
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f48423f10141..42982b26e32b 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -54,9 +54,15 @@ int pud_huge(pud_t pud)
54 54
55#else 55#else
56 56
57/*
58 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
59 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
60 * Otherwise, returns 0.
61 */
57int pmd_huge(pmd_t pmd) 62int pmd_huge(pmd_t pmd)
58{ 63{
59 return !!(pmd_val(pmd) & _PAGE_PSE); 64 return !pmd_none(pmd) &&
65 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
60} 66}
61 67
62int pud_huge(pud_t pud) 68int pud_huge(pud_t pud)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f533d336e569..d96b8bfa748f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3679,6 +3679,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3679{ 3679{
3680 struct page *page; 3680 struct page *page;
3681 3681
3682 if (!pmd_present(*pmd))
3683 return NULL;
3682 page = pte_page(*(pte_t *)pmd); 3684 page = pte_page(*(pte_t *)pmd);
3683 if (page) 3685 if (page)
3684 page += ((address & ~PMD_MASK) >> PAGE_SHIFT); 3686 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);