aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:23 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commitc74df32c724a1652ad8399b4891bb02c9d43743a (patch)
tree5a79d56fdcf7dc2053a277dbf6db7c3b339e9659 /mm/mremap.c
parent1bb3630e89cb8a7b3d3807629c20c5bad88290ff (diff)
[PATCH] mm: ptd_alloc take ptlock
Second step in pushing down the page_table_lock. Remove the temporary bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not to hold page_table_lock, whether it's on init_mm or a user mm; take page_table_lock internally to check if a racing task already allocated. Convert their callers from common code. But avoid coming back to change them again later: instead of moving the spin_lock(&mm->page_table_lock) down, switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which encapsulate the mapping+locking and unlocking+unmapping together, and in the end may use alternatives to the mm page_table_lock itself. These callers all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them; and pte_alloc uses the "atomic" pmd_present to test whether it needs to allocate. It appears that on all arches we can safely descend without page_table_lock. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c27
1 files changed, 9 insertions, 18 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index 616facc3d28a..8de77b632a20 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -28,9 +28,6 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
28 pud_t *pud; 28 pud_t *pud;
29 pmd_t *pmd; 29 pmd_t *pmd;
30 30
31 /*
32 * We don't need page_table_lock: we have mmap_sem exclusively.
33 */
34 pgd = pgd_offset(mm, addr); 31 pgd = pgd_offset(mm, addr);
35 if (pgd_none_or_clear_bad(pgd)) 32 if (pgd_none_or_clear_bad(pgd))
36 return NULL; 33 return NULL;
@@ -50,25 +47,20 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
50{ 47{
51 pgd_t *pgd; 48 pgd_t *pgd;
52 pud_t *pud; 49 pud_t *pud;
53 pmd_t *pmd = NULL; 50 pmd_t *pmd;
54 51
55 /*
56 * We do need page_table_lock: because allocators expect that.
57 */
58 spin_lock(&mm->page_table_lock);
59 pgd = pgd_offset(mm, addr); 52 pgd = pgd_offset(mm, addr);
60 pud = pud_alloc(mm, pgd, addr); 53 pud = pud_alloc(mm, pgd, addr);
61 if (!pud) 54 if (!pud)
62 goto out; 55 return NULL;
63 56
64 pmd = pmd_alloc(mm, pud, addr); 57 pmd = pmd_alloc(mm, pud, addr);
65 if (!pmd) 58 if (!pmd)
66 goto out; 59 return NULL;
67 60
68 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) 61 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
69 pmd = NULL; 62 return NULL;
70out: 63
71 spin_unlock(&mm->page_table_lock);
72 return pmd; 64 return pmd;
73} 65}
74 66
@@ -80,6 +72,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
80 struct address_space *mapping = NULL; 72 struct address_space *mapping = NULL;
81 struct mm_struct *mm = vma->vm_mm; 73 struct mm_struct *mm = vma->vm_mm;
82 pte_t *old_pte, *new_pte, pte; 74 pte_t *old_pte, *new_pte, pte;
75 spinlock_t *old_ptl;
83 76
84 if (vma->vm_file) { 77 if (vma->vm_file) {
85 /* 78 /*
@@ -95,9 +88,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
95 new_vma->vm_truncate_count = 0; 88 new_vma->vm_truncate_count = 0;
96 } 89 }
97 90
98 spin_lock(&mm->page_table_lock); 91 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
99 old_pte = pte_offset_map(old_pmd, old_addr); 92 new_pte = pte_offset_map_nested(new_pmd, new_addr);
100 new_pte = pte_offset_map_nested(new_pmd, new_addr);
101 93
102 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 94 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
103 new_pte++, new_addr += PAGE_SIZE) { 95 new_pte++, new_addr += PAGE_SIZE) {
@@ -110,8 +102,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
110 } 102 }
111 103
112 pte_unmap_nested(new_pte - 1); 104 pte_unmap_nested(new_pte - 1);
113 pte_unmap(old_pte - 1); 105 pte_unmap_unlock(old_pte - 1, old_ptl);
114 spin_unlock(&mm->page_table_lock);
115 if (mapping) 106 if (mapping)
116 spin_unlock(&mapping->i_mmap_lock); 107 spin_unlock(&mapping->i_mmap_lock);
117} 108}