aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:24 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commitb462705ac679f6195d1b23a752cda592d9107495 (patch)
treec4d9be08f67b0ffdc66c3e170614bd03945f3c42 /arch
parentc74df32c724a1652ad8399b4891bb02c9d43743a (diff)
[PATCH] mm: arches skip ptlock
Convert those few architectures which are calling pud_alloc, pmd_alloc, pte_alloc_map on a user mm, not to take the page_table_lock first, nor drop it after. Each of these can continue to use pte_alloc_map, no need to change over to pte_alloc_map_lock, they're neither racy nor swappable. In the sparc64 io_remap_pfn_range, flush_tlb_range then falls outside of the page_table_lock: that's okay, on sparc64 it's like flush_tlb_mm, and that has always been called from outside of page_table_lock in dup_mmap. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/mm-armv.c14
-rw-r--r--arch/arm26/mm/memc.c15
-rw-r--r--arch/sparc/mm/generic.c4
-rw-r--r--arch/sparc64/mm/generic.c6
-rw-r--r--arch/um/kernel/skas/mmu.c3
5 files changed, 3 insertions, 39 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 61bc2fa0511e..60f3e039bac2 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -180,11 +180,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
180 180
181 if (!vectors_high()) { 181 if (!vectors_high()) {
182 /* 182 /*
183 * This lock is here just to satisfy pmd_alloc and pte_lock
184 */
185 spin_lock(&mm->page_table_lock);
186
187 /*
188 * On ARM, first page must always be allocated since it 183 * On ARM, first page must always be allocated since it
189 * contains the machine vectors. 184 * contains the machine vectors.
190 */ 185 */
@@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
201 set_pte(new_pte, *init_pte); 196 set_pte(new_pte, *init_pte);
202 pte_unmap_nested(init_pte); 197 pte_unmap_nested(init_pte);
203 pte_unmap(new_pte); 198 pte_unmap(new_pte);
204
205 spin_unlock(&mm->page_table_lock);
206 } 199 }
207 200
208 return new_pgd; 201 return new_pgd;
209 202
210no_pte: 203no_pte:
211 spin_unlock(&mm->page_table_lock);
212 pmd_free(new_pmd); 204 pmd_free(new_pmd);
213 free_pages((unsigned long)new_pgd, 2);
214 return NULL;
215
216no_pmd: 205no_pmd:
217 spin_unlock(&mm->page_table_lock);
218 free_pages((unsigned long)new_pgd, 2); 206 free_pages((unsigned long)new_pgd, 2);
219 return NULL;
220
221no_pgd: 207no_pgd:
222 return NULL; 208 return NULL;
223} 209}
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c
index d6b008b8db76..34def6397c3c 100644
--- a/arch/arm26/mm/memc.c
+++ b/arch/arm26/mm/memc.c
@@ -79,12 +79,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
79 goto no_pgd; 79 goto no_pgd;
80 80
81 /* 81 /*
82 * This lock is here just to satisfy pmd_alloc and pte_lock
83 * FIXME: I bet we could avoid taking it pretty much altogether
84 */
85 spin_lock(&mm->page_table_lock);
86
87 /*
88 * On ARM, first page must always be allocated since it contains 82 * On ARM, first page must always be allocated since it contains
89 * the machine vectors. 83 * the machine vectors.
90 */ 84 */
@@ -113,23 +107,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
113 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, 107 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
114 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); 108 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
115 109
116 spin_unlock(&mm->page_table_lock);
117
118 /* update MEMC tables */ 110 /* update MEMC tables */
119 cpu_memc_update_all(new_pgd); 111 cpu_memc_update_all(new_pgd);
120 return new_pgd; 112 return new_pgd;
121 113
122no_pte: 114no_pte:
123 spin_unlock(&mm->page_table_lock);
124 pmd_free(new_pmd); 115 pmd_free(new_pmd);
125 free_pgd_slow(new_pgd);
126 return NULL;
127
128no_pmd: 116no_pmd:
129 spin_unlock(&mm->page_table_lock);
130 free_pgd_slow(new_pgd); 117 free_pgd_slow(new_pgd);
131 return NULL;
132
133no_pgd: 118no_pgd:
134 return NULL; 119 return NULL;
135} 120}
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
index 659c9a71f867..9604893ffdbd 100644
--- a/arch/sparc/mm/generic.c
+++ b/arch/sparc/mm/generic.c
@@ -81,9 +81,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
81 dir = pgd_offset(mm, from); 81 dir = pgd_offset(mm, from);
82 flush_cache_range(vma, beg, end); 82 flush_cache_range(vma, beg, end);
83 83
84 spin_lock(&mm->page_table_lock);
85 while (from < end) { 84 while (from < end) {
86 pmd_t *pmd = pmd_alloc(current->mm, dir, from); 85 pmd_t *pmd = pmd_alloc(mm, dir, from);
87 error = -ENOMEM; 86 error = -ENOMEM;
88 if (!pmd) 87 if (!pmd)
89 break; 88 break;
@@ -93,7 +92,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
93 from = (from + PGDIR_SIZE) & PGDIR_MASK; 92 from = (from + PGDIR_SIZE) & PGDIR_MASK;
94 dir++; 93 dir++;
95 } 94 }
96 spin_unlock(&mm->page_table_lock);
97 95
98 flush_tlb_range(vma, beg, end); 96 flush_tlb_range(vma, beg, end);
99 return error; 97 return error;
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index afc01cec701f..112c316e7cd2 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -135,9 +135,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
135 dir = pgd_offset(mm, from); 135 dir = pgd_offset(mm, from);
136 flush_cache_range(vma, beg, end); 136 flush_cache_range(vma, beg, end);
137 137
138 spin_lock(&mm->page_table_lock);
139 while (from < end) { 138 while (from < end) {
140 pud_t *pud = pud_alloc(current->mm, dir, from); 139 pud_t *pud = pud_alloc(mm, dir, from);
141 error = -ENOMEM; 140 error = -ENOMEM;
142 if (!pud) 141 if (!pud)
143 break; 142 break;
@@ -147,8 +146,7 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
147 from = (from + PGDIR_SIZE) & PGDIR_MASK; 146 from = (from + PGDIR_SIZE) & PGDIR_MASK;
148 dir++; 147 dir++;
149 } 148 }
150 flush_tlb_range(vma, beg, end);
151 spin_unlock(&mm->page_table_lock);
152 149
150 flush_tlb_range(vma, beg, end);
153 return error; 151 return error;
154} 152}
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 240143b616a2..02cf36e0331a 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -28,7 +28,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
28 pmd_t *pmd; 28 pmd_t *pmd;
29 pte_t *pte; 29 pte_t *pte;
30 30
31 spin_lock(&mm->page_table_lock);
32 pgd = pgd_offset(mm, proc); 31 pgd = pgd_offset(mm, proc);
33 pud = pud_alloc(mm, pgd, proc); 32 pud = pud_alloc(mm, pgd, proc);
34 if (!pud) 33 if (!pud)
@@ -63,7 +62,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
63 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 62 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
64 *pte = pte_mkexec(*pte); 63 *pte = pte_mkexec(*pte);
65 *pte = pte_wrprotect(*pte); 64 *pte = pte_wrprotect(*pte);
66 spin_unlock(&mm->page_table_lock);
67 return(0); 65 return(0);
68 66
69 out_pmd: 67 out_pmd:
@@ -71,7 +69,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
71 out_pte: 69 out_pte:
72 pmd_free(pmd); 70 pmd_free(pmd);
73 out: 71 out:
74 spin_unlock(&mm->page_table_lock);
75 return(-ENOMEM); 72 return(-ENOMEM);
76} 73}
77 74