aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:27 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commit705e87c0c3c38424f7f30556c85bc20e808d2f59 (patch)
tree7a237e6266f4801385e1226cc497b47e3a2458bd /fs
parent8f4e2101fd7df9031a754eedb82e2060b51f8c45 (diff)
[PATCH] mm: pte_offset_map_lock loops
Convert those common loops using page_table_lock on the outside and pte_offset_map within to use just pte_offset_map_lock within instead. These all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them. But whereas pte_alloc loops tested with the "atomic" pmd_present, these loops are testing with pmd_none, which on i386 PAE tests both lower and upper halves. That's now unsafe, so add a cast into pmd_none to test only the vital lower half: we lose a little sensitivity to a corrupt middle directory, but not enough to worry about. It appears that i386 and UML were the only architectures vulnerable in this way, and pgd and pud no problem. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/task_mmu.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7c89b4549049..7e5e7ec2e36d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -203,13 +203,14 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
203 struct mem_size_stats *mss) 203 struct mem_size_stats *mss)
204{ 204{
205 pte_t *pte, ptent; 205 pte_t *pte, ptent;
206 spinlock_t *ptl;
206 unsigned long pfn; 207 unsigned long pfn;
207 struct page *page; 208 struct page *page;
208 209
209 pte = pte_offset_map(pmd, addr); 210 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
210 do { 211 do {
211 ptent = *pte; 212 ptent = *pte;
212 if (pte_none(ptent) || !pte_present(ptent)) 213 if (!pte_present(ptent))
213 continue; 214 continue;
214 215
215 mss->resident += PAGE_SIZE; 216 mss->resident += PAGE_SIZE;
@@ -230,8 +231,8 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
230 mss->private_clean += PAGE_SIZE; 231 mss->private_clean += PAGE_SIZE;
231 } 232 }
232 } while (pte++, addr += PAGE_SIZE, addr != end); 233 } while (pte++, addr += PAGE_SIZE, addr != end);
233 pte_unmap(pte - 1); 234 pte_unmap_unlock(pte - 1, ptl);
234 cond_resched_lock(&vma->vm_mm->page_table_lock); 235 cond_resched();
235} 236}
236 237
237static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud, 238static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
@@ -285,17 +286,11 @@ static inline void smaps_pgd_range(struct vm_area_struct *vma,
285static int show_smap(struct seq_file *m, void *v) 286static int show_smap(struct seq_file *m, void *v)
286{ 287{
287 struct vm_area_struct *vma = v; 288 struct vm_area_struct *vma = v;
288 struct mm_struct *mm = vma->vm_mm;
289 struct mem_size_stats mss; 289 struct mem_size_stats mss;
290 290
291 memset(&mss, 0, sizeof mss); 291 memset(&mss, 0, sizeof mss);
292 292 if (vma->vm_mm)
293 if (mm) {
294 spin_lock(&mm->page_table_lock);
295 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss); 293 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
296 spin_unlock(&mm->page_table_lock);
297 }
298
299 return show_map_internal(m, v, &mss); 294 return show_map_internal(m, v, &mss);
300} 295}
301 296