aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:30 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:41 -0400
commit508034a32b819a2d40aa7ac0dbc8cd2e044c2de6 (patch)
tree906a8f0095af24f403b30d649d3ec1ffb4ff2f50 /mm/hugetlb.c
parent8f4f8c164cb4af1432cc25eda82928ea4519ba72 (diff)
[PATCH] mm: unmap_vmas with inner ptlock
Remove the page_table_lock from around the calls to unmap_vmas, and replace the pte_offset_map in zap_pte_range by pte_offset_map_lock: all callers are now safe to descend without page_table_lock. Don't attempt fancy locking for hugepages, just take page_table_lock in unmap_hugepage_range. Which makes zap_hugepage_range, and the hugetlb test in zap_page_range, redundant: unmap_vmas calls unmap_hugepage_range anyway. Nor does unmap_vmas have much use for its mm arg now. The tlb_start_vma and tlb_end_vma in unmap_page_range are now called without page_table_lock: if they're implemented at all, they typically come down to flush_cache_range (usually done outside page_table_lock) and flush_tlb_range (which we already audited for the mprotect case). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea0826ff2663..f29b7dc02c39 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -314,6 +314,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
314 BUG_ON(start & ~HPAGE_MASK); 314 BUG_ON(start & ~HPAGE_MASK);
315 BUG_ON(end & ~HPAGE_MASK); 315 BUG_ON(end & ~HPAGE_MASK);
316 316
317 spin_lock(&mm->page_table_lock);
318
317 /* Update high watermark before we lower rss */ 319 /* Update high watermark before we lower rss */
318 update_hiwater_rss(mm); 320 update_hiwater_rss(mm);
319 321
@@ -333,17 +335,9 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
333 put_page(page); 335 put_page(page);
334 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); 336 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
335 } 337 }
336 flush_tlb_range(vma, start, end);
337}
338 338
339void zap_hugepage_range(struct vm_area_struct *vma,
340 unsigned long start, unsigned long length)
341{
342 struct mm_struct *mm = vma->vm_mm;
343
344 spin_lock(&mm->page_table_lock);
345 unmap_hugepage_range(vma, start, start + length);
346 spin_unlock(&mm->page_table_lock); 339 spin_unlock(&mm->page_table_lock);
340 flush_tlb_range(vma, start, end);
347} 341}
348 342
349int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) 343int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)