aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2017-11-02 18:59:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-03 10:39:19 -0400
commitb83d7e432399d44454411dec5c25afb5c4469e96 (patch)
tree14e38018a632c814c4910dc770f889d2e40f4e9c
parent1e3921471354244f70fe268586ff94a97a6dd4df (diff)
mm, /proc/pid/pagemap: fix soft dirty marking for PMD migration entry
When the pagetable is walked in the implementation of /proc/<pid>/pagemap, pmd_soft_dirty() is used for both the PMD huge page map and the PMD migration entries. That is wrong, pmd_swp_soft_dirty() should be used for the PMD migration entries instead because the different page table entry flag is used. As a result, /proc/pid/pagemap may report incorrect soft dirty information for PMD migration entries. Link: http://lkml.kernel.org/r/20171017081818.31795-1-ying.huang@intel.com Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path") Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Michal Hocko <mhocko@suse.com> Cc: David Rientjes <rientjes@google.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Hugh Dickins <hughd@google.com> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Daniel Colascione <dancol@google.com> Cc: Zi Yan <zi.yan@cs.rutgers.edu> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 280282b05bc7..6744bd706ecf 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1311,13 +1311,15 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1311 pmd_t pmd = *pmdp; 1311 pmd_t pmd = *pmdp;
1312 struct page *page = NULL; 1312 struct page *page = NULL;
1313 1313
1314 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) 1314 if (vma->vm_flags & VM_SOFTDIRTY)
1315 flags |= PM_SOFT_DIRTY; 1315 flags |= PM_SOFT_DIRTY;
1316 1316
1317 if (pmd_present(pmd)) { 1317 if (pmd_present(pmd)) {
1318 page = pmd_page(pmd); 1318 page = pmd_page(pmd);
1319 1319
1320 flags |= PM_PRESENT; 1320 flags |= PM_PRESENT;
1321 if (pmd_soft_dirty(pmd))
1322 flags |= PM_SOFT_DIRTY;
1321 if (pm->show_pfn) 1323 if (pm->show_pfn)
1322 frame = pmd_pfn(pmd) + 1324 frame = pmd_pfn(pmd) +
1323 ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1325 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
@@ -1329,6 +1331,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1329 frame = swp_type(entry) | 1331 frame = swp_type(entry) |
1330 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1332 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1331 flags |= PM_SWAP; 1333 flags |= PM_SWAP;
1334 if (pmd_swp_soft_dirty(pmd))
1335 flags |= PM_SOFT_DIRTY;
1332 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 1336 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1333 page = migration_entry_to_page(entry); 1337 page = migration_entry_to_page(entry);
1334 } 1338 }