aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:30 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:41 -0400
commit508034a32b819a2d40aa7ac0dbc8cd2e044c2de6 (patch)
tree906a8f0095af24f403b30d649d3ec1ffb4ff2f50 /include
parent8f4f8c164cb4af1432cc25eda82928ea4519ba72 (diff)
[PATCH] mm: unmap_vmas with inner ptlock
Remove the page_table_lock from around the calls to unmap_vmas, and replace the pte_offset_map in zap_pte_range by pte_offset_map_lock: all callers are now safe to descend without page_table_lock. Don't attempt fancy locking for hugepages, just take page_table_lock in unmap_hugepage_range. Which makes zap_hugepage_range, and the hugetlb test in zap_page_range, redundant: unmap_vmas calls unmap_hugepage_range anyway. Nor does unmap_vmas have much use for its mm arg now. The tlb_start_vma and tlb_end_vma in unmap_page_range are now called without page_table_lock: if they're implemented at all, they typically come down to flush_cache_range (usually done outside page_table_lock) and flush_tlb_range (which we already audited for the mprotect case). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/mm.h2
2 files changed, 1 insertions, 3 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d664330d900e..0cea162b08c0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -16,7 +16,6 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
16int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 16int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
17int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 17int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
18int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int); 18int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
19void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
20void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); 19void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
21int hugetlb_prefault(struct address_space *, struct vm_area_struct *); 20int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
22int hugetlb_report_meminfo(char *); 21int hugetlb_report_meminfo(char *);
@@ -87,7 +86,6 @@ static inline unsigned long hugetlb_total_pages(void)
87#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 86#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
88#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 87#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
89#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) 88#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
90#define zap_hugepage_range(vma, start, len) BUG()
91#define unmap_hugepage_range(vma, start, end) BUG() 89#define unmap_hugepage_range(vma, start, end) BUG()
92#define is_hugepage_mem_enough(size) 0 90#define is_hugepage_mem_enough(size) 0
93#define hugetlb_report_meminfo(buf) 0 91#define hugetlb_report_meminfo(buf) 0
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d4c3512e7db4..972e2ce8e07c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -682,7 +682,7 @@ struct zap_details {
682 682
683unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 683unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
684 unsigned long size, struct zap_details *); 684 unsigned long size, struct zap_details *);
685unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm, 685unsigned long unmap_vmas(struct mmu_gather **tlb,
686 struct vm_area_struct *start_vma, unsigned long start_addr, 686 struct vm_area_struct *start_vma, unsigned long start_addr,
687 unsigned long end_addr, unsigned long *nr_accounted, 687 unsigned long end_addr, unsigned long *nr_accounted,
688 struct zap_details *); 688 struct zap_details *);