aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2010-05-24 17:32:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:06:58 -0400
commite48293fd75b3aa67f43ad6e3d2ff397caa55d58b (patch)
tree8f163687833fb5e1fa84f72e2e72e1ab83aca91e /mm
parent25ef0e50cca790370ad7838e3ad74db6a6a2d829 (diff)
mincore: do nested page table walks
Do page table walks with the well-known nested loops we use in several other places already. This avoids doing full page table walks after every pte range and also allows to handle unmapped areas bigger than one pte range in one go. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mincore.c75
1 files changed, 58 insertions, 17 deletions
diff --git a/mm/mincore.c b/mm/mincore.c
index 211604adc23c..9ac42dc6d7b6 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -144,6 +144,60 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
144 pte_unmap_unlock(ptep - 1, ptl); 144 pte_unmap_unlock(ptep - 1, ptl);
145} 145}
146 146
147static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
148 unsigned long addr, unsigned long end,
149 unsigned char *vec)
150{
151 unsigned long next;
152 pmd_t *pmd;
153
154 pmd = pmd_offset(pud, addr);
155 do {
156 next = pmd_addr_end(addr, end);
157 if (pmd_none_or_clear_bad(pmd))
158 mincore_unmapped_range(vma, addr, next, vec);
159 else
160 mincore_pte_range(vma, pmd, addr, next, vec);
161 vec += (next - addr) >> PAGE_SHIFT;
162 } while (pmd++, addr = next, addr != end);
163}
164
165static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
166 unsigned long addr, unsigned long end,
167 unsigned char *vec)
168{
169 unsigned long next;
170 pud_t *pud;
171
172 pud = pud_offset(pgd, addr);
173 do {
174 next = pud_addr_end(addr, end);
175 if (pud_none_or_clear_bad(pud))
176 mincore_unmapped_range(vma, addr, next, vec);
177 else
178 mincore_pmd_range(vma, pud, addr, next, vec);
179 vec += (next - addr) >> PAGE_SHIFT;
180 } while (pud++, addr = next, addr != end);
181}
182
183static void mincore_page_range(struct vm_area_struct *vma,
184 unsigned long addr, unsigned long end,
185 unsigned char *vec)
186{
187 unsigned long next;
188 pgd_t *pgd;
189
190 pgd = pgd_offset(vma->vm_mm, addr);
191 do {
192 next = pgd_addr_end(addr, end);
193 if (pgd_none_or_clear_bad(pgd))
194 mincore_unmapped_range(vma, addr, next, vec);
195 else
196 mincore_pud_range(vma, pgd, addr, next, vec);
197 vec += (next - addr) >> PAGE_SHIFT;
198 } while (pgd++, addr = next, addr != end);
199}
200
147/* 201/*
148 * Do a chunk of "sys_mincore()". We've already checked 202 * Do a chunk of "sys_mincore()". We've already checked
149 * all the arguments, we hold the mmap semaphore: we should 203 * all the arguments, we hold the mmap semaphore: we should
@@ -151,9 +205,6 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
151 */ 205 */
152static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) 206static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
153{ 207{
154 pgd_t *pgd;
155 pud_t *pud;
156 pmd_t *pmd;
157 struct vm_area_struct *vma; 208 struct vm_area_struct *vma;
158 unsigned long end; 209 unsigned long end;
159 210
@@ -170,21 +221,11 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
170 221
171 end = pmd_addr_end(addr, end); 222 end = pmd_addr_end(addr, end);
172 223
173 pgd = pgd_offset(vma->vm_mm, addr); 224 if (is_vm_hugetlb_page(vma))
174 if (pgd_none_or_clear_bad(pgd)) 225 mincore_hugetlb_page_range(vma, addr, end, vec);
175 goto none_mapped; 226 else
176 pud = pud_offset(pgd, addr); 227 mincore_page_range(vma, addr, end, vec);
177 if (pud_none_or_clear_bad(pud))
178 goto none_mapped;
179 pmd = pmd_offset(pud, addr);
180 if (pmd_none_or_clear_bad(pmd))
181 goto none_mapped;
182
183 mincore_pte_range(vma, pmd, addr, end, vec);
184 return (end - addr) >> PAGE_SHIFT;
185 228
186none_mapped:
187 mincore_unmapped_range(vma, addr, end, vec);
188 return (end - addr) >> PAGE_SHIFT; 229 return (end - addr) >> PAGE_SHIFT;
189} 230}
190 231