aboutsummaryrefslogtreecommitdiffstats
path: root/mm/pagewalk.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/pagewalk.c')
-rw-r--r--mm/pagewalk.c59
1 files changed, 58 insertions, 1 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index d5878bed7841..8b1a2ce21ee5 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1,6 +1,7 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/sched.h> 3#include <linux/sched.h>
4#include <linux/hugetlb.h>
4 5
5static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
6 struct mm_walk *walk) 7 struct mm_walk *walk)
@@ -79,6 +80,37 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
79 return err; 80 return err;
80} 81}
81 82
83#ifdef CONFIG_HUGETLB_PAGE
84static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
85 unsigned long end)
86{
87 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
88 return boundary < end ? boundary : end;
89}
90
91static int walk_hugetlb_range(struct vm_area_struct *vma,
92 unsigned long addr, unsigned long end,
93 struct mm_walk *walk)
94{
95 struct hstate *h = hstate_vma(vma);
96 unsigned long next;
97 unsigned long hmask = huge_page_mask(h);
98 pte_t *pte;
99 int err = 0;
100
101 do {
102 next = hugetlb_entry_end(h, addr, end);
103 pte = huge_pte_offset(walk->mm, addr & hmask);
104 if (pte && walk->hugetlb_entry)
105 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
106 if (err)
107 return err;
108 } while (addr = next, addr != end);
109
110 return 0;
111}
112#endif
113
82/** 114/**
83 * walk_page_range - walk a memory map's page tables with a callback 115 * walk_page_range - walk a memory map's page tables with a callback
84 * @mm: memory map to walk 116 * @mm: memory map to walk
@@ -107,6 +139,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
107 pgd_t *pgd; 139 pgd_t *pgd;
108 unsigned long next; 140 unsigned long next;
109 int err = 0; 141 int err = 0;
142 struct vm_area_struct *vma;
110 143
111 if (addr >= end) 144 if (addr >= end)
112 return err; 145 return err;
@@ -117,11 +150,34 @@ int walk_page_range(unsigned long addr, unsigned long end,
117 pgd = pgd_offset(walk->mm, addr); 150 pgd = pgd_offset(walk->mm, addr);
118 do { 151 do {
119 next = pgd_addr_end(addr, end); 152 next = pgd_addr_end(addr, end);
153
154 /*
155 * handle hugetlb vma individually because pagetable walk for
156 * the hugetlb page is dependent on the architecture and
157 * we can't handled it in the same manner as non-huge pages.
158 */
159 vma = find_vma(walk->mm, addr);
160#ifdef CONFIG_HUGETLB_PAGE
161 if (vma && is_vm_hugetlb_page(vma)) {
162 if (vma->vm_end < next)
163 next = vma->vm_end;
164 /*
165 * Hugepage is very tightly coupled with vma, so
166 * walk through hugetlb entries within a given vma.
167 */
168 err = walk_hugetlb_range(vma, addr, next, walk);
169 if (err)
170 break;
171 pgd = pgd_offset(walk->mm, next);
172 continue;
173 }
174#endif
120 if (pgd_none_or_clear_bad(pgd)) { 175 if (pgd_none_or_clear_bad(pgd)) {
121 if (walk->pte_hole) 176 if (walk->pte_hole)
122 err = walk->pte_hole(addr, next, walk); 177 err = walk->pte_hole(addr, next, walk);
123 if (err) 178 if (err)
124 break; 179 break;
180 pgd++;
125 continue; 181 continue;
126 } 182 }
127 if (walk->pgd_entry) 183 if (walk->pgd_entry)
@@ -131,7 +187,8 @@ int walk_page_range(unsigned long addr, unsigned long end,
131 err = walk_pud_range(pgd, addr, next, walk); 187 err = walk_pud_range(pgd, addr, next, walk);
132 if (err) 188 if (err)
133 break; 189 break;
134 } while (pgd++, addr = next, addr != end); 190 pgd++;
191 } while (addr = next, addr != end);
135 192
136 return err; 193 return err;
137} 194}