aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2015-02-11 18:28:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:06 -0500
commit1e25a271c8ac1c9faebf4eb3fa609189e4e7b1b6 (patch)
treebed90c6a138544e00ec6220737ba7516e6705240 /mm
parent7d5b3bfaa2da150ce2dc45546f2125b854f962ef (diff)
mincore: apply page table walker on do_mincore()
This patch makes do_mincore() use walk_page_vma(), which reduces many lines of code by using common page table walk code. [daeseok.youn@gmail.com: remove unneeded variable 'err'] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Daeseok Youn <daeseok.youn@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c20
-rw-r--r--mm/mincore.c166
2 files changed, 60 insertions, 126 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0531ea7dd7cf..29bc6e471df4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1412,26 +1412,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1412 return ret; 1412 return ret;
1413} 1413}
1414 1414
1415int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1416 unsigned long addr, unsigned long end,
1417 unsigned char *vec)
1418{
1419 spinlock_t *ptl;
1420 int ret = 0;
1421
1422 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1423 /*
1424 * All logical pages in the range are present
1425 * if backed by a huge page.
1426 */
1427 spin_unlock(ptl);
1428 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1429 ret = 1;
1430 }
1431
1432 return ret;
1433}
1434
1435int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 1415int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1436 unsigned long old_addr, 1416 unsigned long old_addr,
1437 unsigned long new_addr, unsigned long old_end, 1417 unsigned long new_addr, unsigned long old_end,
diff --git a/mm/mincore.c b/mm/mincore.c
index 46527c023e0c..be25efde64a4 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -19,38 +19,25 @@
19#include <asm/uaccess.h> 19#include <asm/uaccess.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21 21
22static void mincore_hugetlb_page_range(struct vm_area_struct *vma, 22static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
23 unsigned long addr, unsigned long end, 23 unsigned long end, struct mm_walk *walk)
24 unsigned char *vec)
25{ 24{
26#ifdef CONFIG_HUGETLB_PAGE 25#ifdef CONFIG_HUGETLB_PAGE
27 struct hstate *h; 26 unsigned char present;
27 unsigned char *vec = walk->private;
28 28
29 h = hstate_vma(vma); 29 /*
30 while (1) { 30 * Hugepages under user process are always in RAM and never
31 unsigned char present; 31 * swapped out, but theoretically it needs to be checked.
32 pte_t *ptep; 32 */
33 /* 33 present = pte && !huge_pte_none(huge_ptep_get(pte));
34 * Huge pages are always in RAM for now, but 34 for (; addr != end; vec++, addr += PAGE_SIZE)
35 * theoretically it needs to be checked. 35 *vec = present;
36 */ 36 walk->private = vec;
37 ptep = huge_pte_offset(current->mm,
38 addr & huge_page_mask(h));
39 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
40 while (1) {
41 *vec = present;
42 vec++;
43 addr += PAGE_SIZE;
44 if (addr == end)
45 return;
46 /* check hugepage border */
47 if (!(addr & ~huge_page_mask(h)))
48 break;
49 }
50 }
51#else 37#else
52 BUG(); 38 BUG();
53#endif 39#endif
40 return 0;
54} 41}
55 42
56/* 43/*
@@ -94,9 +81,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
94 return present; 81 return present;
95} 82}
96 83
97static void mincore_unmapped_range(struct vm_area_struct *vma, 84static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
98 unsigned long addr, unsigned long end, 85 struct vm_area_struct *vma, unsigned char *vec)
99 unsigned char *vec)
100{ 86{
101 unsigned long nr = (end - addr) >> PAGE_SHIFT; 87 unsigned long nr = (end - addr) >> PAGE_SHIFT;
102 int i; 88 int i;
@@ -111,23 +97,44 @@ static void mincore_unmapped_range(struct vm_area_struct *vma,
111 for (i = 0; i < nr; i++) 97 for (i = 0; i < nr; i++)
112 vec[i] = 0; 98 vec[i] = 0;
113 } 99 }
100 return nr;
101}
102
103static int mincore_unmapped_range(unsigned long addr, unsigned long end,
104 struct mm_walk *walk)
105{
106 walk->private += __mincore_unmapped_range(addr, end,
107 walk->vma, walk->private);
108 return 0;
114} 109}
115 110
116static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 111static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
117 unsigned long addr, unsigned long end, 112 struct mm_walk *walk)
118 unsigned char *vec)
119{ 113{
120 unsigned long next;
121 spinlock_t *ptl; 114 spinlock_t *ptl;
115 struct vm_area_struct *vma = walk->vma;
122 pte_t *ptep; 116 pte_t *ptep;
117 unsigned char *vec = walk->private;
118 int nr = (end - addr) >> PAGE_SHIFT;
119
120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
121 memset(vec, 1, nr);
122 spin_unlock(ptl);
123 goto out;
124 }
125
126 if (pmd_trans_unstable(pmd)) {
127 __mincore_unmapped_range(addr, end, vma, vec);
128 goto out;
129 }
123 130
124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 131 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
125 do { 132 for (; addr != end; ptep++, addr += PAGE_SIZE) {
126 pte_t pte = *ptep; 133 pte_t pte = *ptep;
127 134
128 next = addr + PAGE_SIZE;
129 if (pte_none(pte)) 135 if (pte_none(pte))
130 mincore_unmapped_range(vma, addr, next, vec); 136 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
137 vma, vec);
131 else if (pte_present(pte)) 138 else if (pte_present(pte))
132 *vec = 1; 139 *vec = 1;
133 else { /* pte is a swap entry */ 140 else { /* pte is a swap entry */
@@ -150,69 +157,12 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
150 } 157 }
151 } 158 }
152 vec++; 159 vec++;
153 } while (ptep++, addr = next, addr != end); 160 }
154 pte_unmap_unlock(ptep - 1, ptl); 161 pte_unmap_unlock(ptep - 1, ptl);
155} 162out:
156 163 walk->private += nr;
157static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, 164 cond_resched();
158 unsigned long addr, unsigned long end, 165 return 0;
159 unsigned char *vec)
160{
161 unsigned long next;
162 pmd_t *pmd;
163
164 pmd = pmd_offset(pud, addr);
165 do {
166 next = pmd_addr_end(addr, end);
167 if (pmd_trans_huge(*pmd)) {
168 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
169 vec += (next - addr) >> PAGE_SHIFT;
170 continue;
171 }
172 /* fall through */
173 }
174 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
175 mincore_unmapped_range(vma, addr, next, vec);
176 else
177 mincore_pte_range(vma, pmd, addr, next, vec);
178 vec += (next - addr) >> PAGE_SHIFT;
179 } while (pmd++, addr = next, addr != end);
180}
181
182static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
183 unsigned long addr, unsigned long end,
184 unsigned char *vec)
185{
186 unsigned long next;
187 pud_t *pud;
188
189 pud = pud_offset(pgd, addr);
190 do {
191 next = pud_addr_end(addr, end);
192 if (pud_none_or_clear_bad(pud))
193 mincore_unmapped_range(vma, addr, next, vec);
194 else
195 mincore_pmd_range(vma, pud, addr, next, vec);
196 vec += (next - addr) >> PAGE_SHIFT;
197 } while (pud++, addr = next, addr != end);
198}
199
200static void mincore_page_range(struct vm_area_struct *vma,
201 unsigned long addr, unsigned long end,
202 unsigned char *vec)
203{
204 unsigned long next;
205 pgd_t *pgd;
206
207 pgd = pgd_offset(vma->vm_mm, addr);
208 do {
209 next = pgd_addr_end(addr, end);
210 if (pgd_none_or_clear_bad(pgd))
211 mincore_unmapped_range(vma, addr, next, vec);
212 else
213 mincore_pud_range(vma, pgd, addr, next, vec);
214 vec += (next - addr) >> PAGE_SHIFT;
215 } while (pgd++, addr = next, addr != end);
216} 166}
217 167
218/* 168/*
@@ -224,18 +174,22 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
224{ 174{
225 struct vm_area_struct *vma; 175 struct vm_area_struct *vma;
226 unsigned long end; 176 unsigned long end;
177 int err;
178 struct mm_walk mincore_walk = {
179 .pmd_entry = mincore_pte_range,
180 .pte_hole = mincore_unmapped_range,
181 .hugetlb_entry = mincore_hugetlb,
182 .private = vec,
183 };
227 184
228 vma = find_vma(current->mm, addr); 185 vma = find_vma(current->mm, addr);
229 if (!vma || addr < vma->vm_start) 186 if (!vma || addr < vma->vm_start)
230 return -ENOMEM; 187 return -ENOMEM;
231 188 mincore_walk.mm = vma->vm_mm;
232 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); 189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
233 190 err = walk_page_range(addr, end, &mincore_walk);
234 if (is_vm_hugetlb_page(vma)) 191 if (err < 0)
235 mincore_hugetlb_page_range(vma, addr, end, vec); 192 return err;
236 else
237 mincore_page_range(vma, addr, end, vec);
238
239 return (end - addr) >> PAGE_SHIFT; 193 return (end - addr) >> PAGE_SHIFT;
240} 194}
241 195