aboutsummaryrefslogtreecommitdiffstats
path: root/mm/pagewalk.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 00:00:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 00:00:19 -0400
commit45b583b10a8b438b970e95a7d1d4db22c9e35004 (patch)
tree14fa481598289df0459580c582b48a9d95db51f6 /mm/pagewalk.c
parent154dd78d30b56ffb8b447f629bfcceb14150e5c4 (diff)
parentf19da2ce8ef5e49b8b8ea199c3601dd45d71b262 (diff)
Merge 'akpm' patch series
* Merge akpm patch series: (122 commits) drivers/connector/cn_proc.c: remove unused local Documentation/SubmitChecklist: add RCU debug config options reiserfs: use hweight_long() reiserfs: use proper little-endian bitops pnpacpi: register disabled resources drivers/rtc/rtc-tegra.c: properly initialize spinlock drivers/rtc/rtc-twl.c: check return value of twl_rtc_write_u8() in twl_rtc_set_time() drivers/rtc: add support for Qualcomm PMIC8xxx RTC drivers/rtc/rtc-s3c.c: support clock gating drivers/rtc/rtc-mpc5121.c: add support for RTC on MPC5200 init: skip calibration delay if previously done misc/eeprom: add eeprom access driver for digsy_mtc board misc/eeprom: add driver for microwire 93xx46 EEPROMs checkpatch.pl: update $logFunctions checkpatch: make utf-8 test --strict checkpatch.pl: add ability to ignore various messages checkpatch: add a "prefer __aligned" check checkpatch: validate signature styles and To: and Cc: lines checkpatch: add __rcu as a sparse modifier checkpatch: suggest using min_t or max_t ... Did this as a merge because of (trivial) conflicts in - Documentation/feature-removal-schedule.txt - arch/xtensa/include/asm/uaccess.h that were just easier to fix up in the merge than in the patch series.
Diffstat (limited to 'mm/pagewalk.c')
-rw-r--r--mm/pagewalk.c49
1 files changed, 42 insertions, 7 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index c3450d533611..2f5cf10ff660 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -126,7 +126,39 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
126 126
127 return 0; 127 return 0;
128} 128}
129#endif 129
130static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
131{
132 struct vm_area_struct *vma;
133
134 /* We don't need vma lookup at all. */
135 if (!walk->hugetlb_entry)
136 return NULL;
137
138 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
139 vma = find_vma(walk->mm, addr);
140 if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
141 return vma;
142
143 return NULL;
144}
145
146#else /* CONFIG_HUGETLB_PAGE */
147static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
148{
149 return NULL;
150}
151
152static int walk_hugetlb_range(struct vm_area_struct *vma,
153 unsigned long addr, unsigned long end,
154 struct mm_walk *walk)
155{
156 return 0;
157}
158
159#endif /* CONFIG_HUGETLB_PAGE */
160
161
130 162
131/** 163/**
132 * walk_page_range - walk a memory map's page tables with a callback 164 * walk_page_range - walk a memory map's page tables with a callback
@@ -144,11 +176,15 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
144 * associated range, and a copy of the original mm_walk for access to 176 * associated range, and a copy of the original mm_walk for access to
145 * the ->private or ->mm fields. 177 * the ->private or ->mm fields.
146 * 178 *
147 * No locks are taken, but the bottom level iterator will map PTE 179 * Usually no locks are taken, but splitting transparent huge page may
180 * take page table lock. And the bottom level iterator will map PTE
148 * directories from highmem if necessary. 181 * directories from highmem if necessary.
149 * 182 *
150 * If any callback returns a non-zero value, the walk is aborted and 183 * If any callback returns a non-zero value, the walk is aborted and
151 * the return value is propagated back to the caller. Otherwise 0 is returned. 184 * the return value is propagated back to the caller. Otherwise 0 is returned.
185 *
186 * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry
187 * is !NULL.
152 */ 188 */
153int walk_page_range(unsigned long addr, unsigned long end, 189int walk_page_range(unsigned long addr, unsigned long end,
154 struct mm_walk *walk) 190 struct mm_walk *walk)
@@ -165,18 +201,17 @@ int walk_page_range(unsigned long addr, unsigned long end,
165 201
166 pgd = pgd_offset(walk->mm, addr); 202 pgd = pgd_offset(walk->mm, addr);
167 do { 203 do {
168 struct vm_area_struct *uninitialized_var(vma); 204 struct vm_area_struct *vma;
169 205
170 next = pgd_addr_end(addr, end); 206 next = pgd_addr_end(addr, end);
171 207
172#ifdef CONFIG_HUGETLB_PAGE
173 /* 208 /*
174 * handle hugetlb vma individually because pagetable walk for 209 * handle hugetlb vma individually because pagetable walk for
175 * the hugetlb page is dependent on the architecture and 210 * the hugetlb page is dependent on the architecture and
176 * we can't handled it in the same manner as non-huge pages. 211 * we can't handled it in the same manner as non-huge pages.
177 */ 212 */
178 vma = find_vma(walk->mm, addr); 213 vma = hugetlb_vma(addr, walk);
179 if (vma && is_vm_hugetlb_page(vma)) { 214 if (vma) {
180 if (vma->vm_end < next) 215 if (vma->vm_end < next)
181 next = vma->vm_end; 216 next = vma->vm_end;
182 /* 217 /*
@@ -189,7 +224,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
189 pgd = pgd_offset(walk->mm, next); 224 pgd = pgd_offset(walk->mm, next);
190 continue; 225 continue;
191 } 226 }
192#endif 227
193 if (pgd_none_or_clear_bad(pgd)) { 228 if (pgd_none_or_clear_bad(pgd)) {
194 if (walk->pte_hole) 229 if (walk->pte_hole)
195 err = walk->pte_hole(addr, next, walk); 230 err = walk->pte_hole(addr, next, walk);