aboutsummaryrefslogtreecommitdiffstats
path: root/mm/pagewalk.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-07-25 20:12:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-25 23:57:08 -0400
commit6c6d5280431544e4036886ea74e3334a98bc5f96 (patch)
tree1362944d254ff1dfa609d84a1e4182a77ce3ca07 /mm/pagewalk.c
parent4b6ddbf7ed4ef2f40e0a27418146eedaa68953c6 (diff)
pagewalk: don't look up vma if walk->hugetlb_entry is unused
Currently, walk_page_range() calls find_vma() every page table for walk iteration. but it's completely unnecessary if walk->hugetlb_entry is unused. And we don't have to assume find_vma() is a lightweight operation. So this patch checks the walk->hugetlb_entry and avoids the find_vma() call if possible. This patch also makes some cleanups. 1) remove ugly uninitialized_var() and 2) #ifdef in function body. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/pagewalk.c')
-rw-r--r--mm/pagewalk.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 606bbb4125d0..ee4ff87c58c1 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -126,7 +126,39 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
126 126
127 return 0; 127 return 0;
128} 128}
129#endif 129
130static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
131{
132 struct vm_area_struct *vma;
133
134 /* We don't need vma lookup at all. */
135 if (!walk->hugetlb_entry)
136 return NULL;
137
138 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
139 vma = find_vma(walk->mm, addr);
140 if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
141 return vma;
142
143 return NULL;
144}
145
146#else /* CONFIG_HUGETLB_PAGE */
147static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
148{
149 return NULL;
150}
151
152static int walk_hugetlb_range(struct vm_area_struct *vma,
153 unsigned long addr, unsigned long end,
154 struct mm_walk *walk)
155{
156 return 0;
157}
158
159#endif /* CONFIG_HUGETLB_PAGE */
160
161
130 162
131/** 163/**
132 * walk_page_range - walk a memory map's page tables with a callback 164 * walk_page_range - walk a memory map's page tables with a callback
@@ -165,18 +197,17 @@ int walk_page_range(unsigned long addr, unsigned long end,
165 197
166 pgd = pgd_offset(walk->mm, addr); 198 pgd = pgd_offset(walk->mm, addr);
167 do { 199 do {
168 struct vm_area_struct *uninitialized_var(vma); 200 struct vm_area_struct *vma;
169 201
170 next = pgd_addr_end(addr, end); 202 next = pgd_addr_end(addr, end);
171 203
172#ifdef CONFIG_HUGETLB_PAGE
173 /* 204 /*
174 * handle hugetlb vma individually because pagetable walk for 205 * handle hugetlb vma individually because pagetable walk for
175 * the hugetlb page is dependent on the architecture and 206 * the hugetlb page is dependent on the architecture and
176 * we can't handled it in the same manner as non-huge pages. 207 * we can't handled it in the same manner as non-huge pages.
177 */ 208 */
178 vma = find_vma(walk->mm, addr); 209 vma = hugetlb_vma(addr, walk);
179 if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) { 210 if (vma) {
180 if (vma->vm_end < next) 211 if (vma->vm_end < next)
181 next = vma->vm_end; 212 next = vma->vm_end;
182 /* 213 /*
@@ -189,7 +220,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
189 pgd = pgd_offset(walk->mm, next); 220 pgd = pgd_offset(walk->mm, next);
190 continue; 221 continue;
191 } 222 }
192#endif 223
193 if (pgd_none_or_clear_bad(pgd)) { 224 if (pgd_none_or_clear_bad(pgd)) {
194 if (walk->pte_hole) 225 if (walk->pte_hole)
195 err = walk->pte_hole(addr, next, walk); 226 err = walk->pte_hole(addr, next, walk);