aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2015-02-11 18:27:48 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:05 -0500
commitf995ece24dfecb3614468befbe4e6e777b854cc0 (patch)
treef9c4336bf189e6ebd48b6fd49913cfd5293e2e47 /fs
parent5c64f52acdbc615e3ef58692f42ee00b83d0225d (diff)
pagemap: use walk->vma instead of calling find_vma()
Page table walker has the information of the current vma in mm_walk, so we don't have to call find_vma() in each pagemap_(pte|hugetlb)_range() call any longer. Currently pagemap_pte_range() does vma loop itself, so this patch reduces many lines of code. NULL-vma check is omitted because we assume that we never run these callbacks on any address outside vma. And even if it were broken, NULL pointer dereference would be detected, so we can get enough information for debugging. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/task_mmu.c68
1 files changed, 14 insertions, 54 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index bed0834715a5..4206706dd92a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1047,15 +1047,13 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap
1047static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 1047static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1048 struct mm_walk *walk) 1048 struct mm_walk *walk)
1049{ 1049{
1050 struct vm_area_struct *vma; 1050 struct vm_area_struct *vma = walk->vma;
1051 struct pagemapread *pm = walk->private; 1051 struct pagemapread *pm = walk->private;
1052 spinlock_t *ptl; 1052 spinlock_t *ptl;
1053 pte_t *pte, *orig_pte; 1053 pte_t *pte, *orig_pte;
1054 int err = 0; 1054 int err = 0;
1055 1055
1056 /* find the first VMA at or above 'addr' */ 1056 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1057 vma = find_vma(walk->mm, addr);
1058 if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1059 int pmd_flags2; 1057 int pmd_flags2;
1060 1058
1061 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) 1059 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1081,55 +1079,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1081 if (pmd_trans_unstable(pmd)) 1079 if (pmd_trans_unstable(pmd))
1082 return 0; 1080 return 0;
1083 1081
1084 while (1) { 1082 /*
1085 /* End of address space hole, which we mark as non-present. */ 1083 * We can assume that @vma always points to a valid one and @end never
1086 unsigned long hole_end; 1084 * goes beyond vma->vm_end.
1087 1085 */
1088 if (vma) 1086 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1089 hole_end = min(end, vma->vm_start); 1087 for (; addr < end; pte++, addr += PAGE_SIZE) {
1090 else 1088 pagemap_entry_t pme;
1091 hole_end = end;
1092
1093 for (; addr < hole_end; addr += PAGE_SIZE) {
1094 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1095
1096 err = add_to_pagemap(addr, &pme, pm);
1097 if (err)
1098 return err;
1099 }
1100
1101 if (!vma || vma->vm_start >= end)
1102 break;
1103 /*
1104 * We can't possibly be in a hugetlb VMA. In general,
1105 * for a mm_walk with a pmd_entry and a hugetlb_entry,
1106 * the pmd_entry can only be called on addresses in a
1107 * hugetlb if the walk starts in a non-hugetlb VMA and
1108 * spans a hugepage VMA. Since pagemap_read walks are
1109 * PMD-sized and PMD-aligned, this will never be true.
1110 */
1111 BUG_ON(is_vm_hugetlb_page(vma));
1112
1113 /* Addresses in the VMA. */
1114 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1115 for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
1116 pagemap_entry_t pme;
1117
1118 pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1119 err = add_to_pagemap(addr, &pme, pm);
1120 if (err)
1121 break;
1122 }
1123 pte_unmap_unlock(orig_pte, ptl);
1124 1089
1090 pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1091 err = add_to_pagemap(addr, &pme, pm);
1125 if (err) 1092 if (err)
1126 return err;
1127
1128 if (addr == end)
1129 break; 1093 break;
1130
1131 vma = find_vma(walk->mm, addr);
1132 } 1094 }
1095 pte_unmap_unlock(orig_pte, ptl);
1133 1096
1134 cond_resched(); 1097 cond_resched();
1135 1098
@@ -1155,15 +1118,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1155 struct mm_walk *walk) 1118 struct mm_walk *walk)
1156{ 1119{
1157 struct pagemapread *pm = walk->private; 1120 struct pagemapread *pm = walk->private;
1158 struct vm_area_struct *vma; 1121 struct vm_area_struct *vma = walk->vma;
1159 int err = 0; 1122 int err = 0;
1160 int flags2; 1123 int flags2;
1161 pagemap_entry_t pme; 1124 pagemap_entry_t pme;
1162 1125
1163 vma = find_vma(walk->mm, addr); 1126 if (vma->vm_flags & VM_SOFTDIRTY)
1164 WARN_ON_ONCE(!vma);
1165
1166 if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1167 flags2 = __PM_SOFT_DIRTY; 1127 flags2 = __PM_SOFT_DIRTY;
1168 else 1128 else
1169 flags2 = 0; 1129 flags2 = 0;