aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2015-02-11 18:27:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:05 -0500
commit0b1fbfe50006c41014cc25660c0e735d21c34939 (patch)
tree14c4d882894fdb12dbe7f0f34d01e595aa2b2fba
parent05fbf357d94152171bc50f8a369390f1f16efd89 (diff)
mm/pagewalk: remove pgd_entry() and pud_entry()
Currently no user of page table walker sets ->pgd_entry() or ->pud_entry(), so checking their existence in each loop is just wasting CPU cycle. So let's remove it to reduce overhead. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h6
-rw-r--r--mm/pagewalk.c9
2 files changed, 2 insertions, 13 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3696b3bd1d7e..f6106d3f3dab 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1164,8 +1164,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1164 1164
1165/** 1165/**
1166 * mm_walk - callbacks for walk_page_range 1166 * mm_walk - callbacks for walk_page_range
1167 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
1168 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1169 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1167 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1170 * this handler is required to be able to handle 1168 * this handler is required to be able to handle
1171 * pmd_trans_huge() pmds. They may simply choose to 1169 * pmd_trans_huge() pmds. They may simply choose to
@@ -1179,10 +1177,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1179 * (see walk_page_range for more details) 1177 * (see walk_page_range for more details)
1180 */ 1178 */
1181struct mm_walk { 1179struct mm_walk {
1182 int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1183 unsigned long next, struct mm_walk *walk);
1184 int (*pud_entry)(pud_t *pud, unsigned long addr,
1185 unsigned long next, struct mm_walk *walk);
1186 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1180 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1187 unsigned long next, struct mm_walk *walk); 1181 unsigned long next, struct mm_walk *walk);
1188 int (*pte_entry)(pte_t *pte, unsigned long addr, 1182 int (*pte_entry)(pte_t *pte, unsigned long addr,
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index b264bda46e1b..b793ef149da2 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -86,9 +86,7 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
86 break; 86 break;
87 continue; 87 continue;
88 } 88 }
89 if (walk->pud_entry) 89 if (walk->pmd_entry || walk->pte_entry)
90 err = walk->pud_entry(pud, addr, next, walk);
91 if (!err && (walk->pmd_entry || walk->pte_entry))
92 err = walk_pmd_range(pud, addr, next, walk); 90 err = walk_pmd_range(pud, addr, next, walk);
93 if (err) 91 if (err)
94 break; 92 break;
@@ -237,10 +235,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
237 pgd++; 235 pgd++;
238 continue; 236 continue;
239 } 237 }
240 if (walk->pgd_entry) 238 if (walk->pmd_entry || walk->pte_entry)
241 err = walk->pgd_entry(pgd, addr, next, walk);
242 if (!err &&
243 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
244 err = walk_pud_range(pgd, addr, next, walk); 239 err = walk_pud_range(pgd, addr, next, walk);
245 if (err) 240 if (err)
246 break; 241 break;