aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hugetlbfs
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:30 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:41 -0400
commit508034a32b819a2d40aa7ac0dbc8cd2e044c2de6 (patch)
tree906a8f0095af24f403b30d649d3ec1ffb4ff2f50 /fs/hugetlbfs
parent8f4f8c164cb4af1432cc25eda82928ea4519ba72 (diff)
[PATCH] mm: unmap_vmas with inner ptlock
Remove the page_table_lock from around the calls to unmap_vmas, and replace the pte_offset_map in zap_pte_range by pte_offset_map_lock: all callers are now safe to descend without page_table_lock. Don't attempt fancy locking for hugepages, just take page_table_lock in unmap_hugepage_range. Which makes zap_hugepage_range, and the hugetlb test in zap_page_range, redundant: unmap_vmas calls unmap_hugepage_range anyway. Nor does unmap_vmas have much use for its mm arg now. The tlb_start_vma and tlb_end_vma in unmap_page_range are now called without page_table_lock: if they're implemented at all, they typically come down to flush_cache_range (usually done outside page_table_lock) and flush_tlb_range (which we already audited for the mprotect case). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/hugetlbfs')
-rw-r--r--fs/hugetlbfs/inode.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 3a9b6d179cbd..a826a8add5e3 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -92,7 +92,7 @@ out:
92} 92}
93 93
94/* 94/*
95 * Called under down_write(mmap_sem), page_table_lock is not held 95 * Called under down_write(mmap_sem).
96 */ 96 */
97 97
98#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 98#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
@@ -308,7 +308,6 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
308 308
309 vma_prio_tree_foreach(vma, &iter, root, h_pgoff, ULONG_MAX) { 309 vma_prio_tree_foreach(vma, &iter, root, h_pgoff, ULONG_MAX) {
310 unsigned long h_vm_pgoff; 310 unsigned long h_vm_pgoff;
311 unsigned long v_length;
312 unsigned long v_offset; 311 unsigned long v_offset;
313 312
314 h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT); 313 h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
@@ -319,11 +318,8 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
319 if (h_vm_pgoff >= h_pgoff) 318 if (h_vm_pgoff >= h_pgoff)
320 v_offset = 0; 319 v_offset = 0;
321 320
322 v_length = vma->vm_end - vma->vm_start; 321 unmap_hugepage_range(vma,
323 322 vma->vm_start + v_offset, vma->vm_end);
324 zap_hugepage_range(vma,
325 vma->vm_start + v_offset,
326 v_length - v_offset);
327 } 323 }
328} 324}
329 325