diff options
author | Zi Yan <zi.yan@cs.rutgers.edu> | 2017-09-08 19:11:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-08 21:26:45 -0400 |
commit | 84c3fc4e9c563d8fb91cfdf5948da48fe1af34d3 (patch) | |
tree | fc028524f9272c5ba1690d18937ebfc0bf0a0f9e /mm/gup.c | |
parent | 616b8371539a6c487404c3b8fb04078016dab4ba (diff) |
mm: thp: check pmd migration entry in common path
When THP migration is being used, memory management code needs to handle
pmd migration entries properly. This patch uses !pmd_present() or
is_swap_pmd() (depending on whether pmd_none() needs separate code or
not) to check pmd migration entries at the places where a pmd entry is
present.
Since pmd-related code uses split_huge_page(), split_huge_pmd(),
pmd_trans_huge(), pmd_trans_unstable(), or
pmd_none_or_trans_huge_or_clear_bad(), this patch:
1. adds pmd migration entry split code in split_huge_pmd(),
2. takes care of pmd migration entries whenever pmd_trans_huge() is present,
3. makes pmd_none_or_trans_huge_or_clear_bad() pmd migration entry aware.
Since split_huge_page() uses split_huge_pmd() and pmd_trans_unstable()
is equivalent to pmd_none_or_trans_huge_or_clear_bad(), we do not change
them.
Until this commit, a pmd entry should be:
1. pointing to a pte page,
2. is_swap_pmd(),
3. pmd_trans_huge(),
4. pmd_devmap(), or
5. pmd_none().
Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r-- | mm/gup.c | 22 |
1 files changed, 20 insertions, 2 deletions
@@ -234,6 +234,16 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, | |||
234 | return page; | 234 | return page; |
235 | return no_page_table(vma, flags); | 235 | return no_page_table(vma, flags); |
236 | } | 236 | } |
237 | retry: | ||
238 | if (!pmd_present(*pmd)) { | ||
239 | if (likely(!(flags & FOLL_MIGRATION))) | ||
240 | return no_page_table(vma, flags); | ||
241 | VM_BUG_ON(thp_migration_supported() && | ||
242 | !is_pmd_migration_entry(*pmd)); | ||
243 | if (is_pmd_migration_entry(*pmd)) | ||
244 | pmd_migration_entry_wait(mm, pmd); | ||
245 | goto retry; | ||
246 | } | ||
237 | if (pmd_devmap(*pmd)) { | 247 | if (pmd_devmap(*pmd)) { |
238 | ptl = pmd_lock(mm, pmd); | 248 | ptl = pmd_lock(mm, pmd); |
239 | page = follow_devmap_pmd(vma, address, pmd, flags); | 249 | page = follow_devmap_pmd(vma, address, pmd, flags); |
@@ -247,7 +257,15 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, | |||
247 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) | 257 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) |
248 | return no_page_table(vma, flags); | 258 | return no_page_table(vma, flags); |
249 | 259 | ||
260 | retry_locked: | ||
250 | ptl = pmd_lock(mm, pmd); | 261 | ptl = pmd_lock(mm, pmd); |
262 | if (unlikely(!pmd_present(*pmd))) { | ||
263 | spin_unlock(ptl); | ||
264 | if (likely(!(flags & FOLL_MIGRATION))) | ||
265 | return no_page_table(vma, flags); | ||
266 | pmd_migration_entry_wait(mm, pmd); | ||
267 | goto retry_locked; | ||
268 | } | ||
251 | if (unlikely(!pmd_trans_huge(*pmd))) { | 269 | if (unlikely(!pmd_trans_huge(*pmd))) { |
252 | spin_unlock(ptl); | 270 | spin_unlock(ptl); |
253 | return follow_page_pte(vma, address, pmd, flags); | 271 | return follow_page_pte(vma, address, pmd, flags); |
@@ -424,7 +442,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, | |||
424 | pud = pud_offset(p4d, address); | 442 | pud = pud_offset(p4d, address); |
425 | BUG_ON(pud_none(*pud)); | 443 | BUG_ON(pud_none(*pud)); |
426 | pmd = pmd_offset(pud, address); | 444 | pmd = pmd_offset(pud, address); |
427 | if (pmd_none(*pmd)) | 445 | if (!pmd_present(*pmd)) |
428 | return -EFAULT; | 446 | return -EFAULT; |
429 | VM_BUG_ON(pmd_trans_huge(*pmd)); | 447 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
430 | pte = pte_offset_map(pmd, address); | 448 | pte = pte_offset_map(pmd, address); |
@@ -1534,7 +1552,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
1534 | pmd_t pmd = READ_ONCE(*pmdp); | 1552 | pmd_t pmd = READ_ONCE(*pmdp); |
1535 | 1553 | ||
1536 | next = pmd_addr_end(addr, end); | 1554 | next = pmd_addr_end(addr, end); |
1537 | if (pmd_none(pmd)) | 1555 | if (!pmd_present(pmd)) |
1538 | return 0; | 1556 | return 0; |
1539 | 1557 | ||
1540 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { | 1558 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { |