aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm26
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:24 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commitb462705ac679f6195d1b23a752cda592d9107495 (patch)
treec4d9be08f67b0ffdc66c3e170614bd03945f3c42 /arch/arm26
parentc74df32c724a1652ad8399b4891bb02c9d43743a (diff)
[PATCH] mm: arches skip ptlock
Convert those few architectures which are calling pud_alloc, pmd_alloc, pte_alloc_map on a user mm, not to take the page_table_lock first, nor drop it after. Each of these can continue to use pte_alloc_map, no need to change over to pte_alloc_map_lock, they're neither racy nor swappable. In the sparc64 io_remap_pfn_range, flush_tlb_range then falls outside of the page_table_lock: that's okay, on sparc64 it's like flush_tlb_mm, and that has always been called from outside of page_table_lock in dup_mmap. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/arm26')
-rw-r--r--arch/arm26/mm/memc.c15
1 files changed, 0 insertions, 15 deletions
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c
index d6b008b8db76..34def6397c3c 100644
--- a/arch/arm26/mm/memc.c
+++ b/arch/arm26/mm/memc.c
@@ -79,12 +79,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
79 goto no_pgd; 79 goto no_pgd;
80 80
81 /* 81 /*
82 * This lock is here just to satisfy pmd_alloc and pte_lock
83 * FIXME: I bet we could avoid taking it pretty much altogether
84 */
85 spin_lock(&mm->page_table_lock);
86
87 /*
88 * On ARM, first page must always be allocated since it contains 82 * On ARM, first page must always be allocated since it contains
89 * the machine vectors. 83 * the machine vectors.
90 */ 84 */
@@ -113,23 +107,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
113 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, 107 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
114 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); 108 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
115 109
116 spin_unlock(&mm->page_table_lock);
117
118 /* update MEMC tables */ 110 /* update MEMC tables */
119 cpu_memc_update_all(new_pgd); 111 cpu_memc_update_all(new_pgd);
120 return new_pgd; 112 return new_pgd;
121 113
122no_pte: 114no_pte:
123 spin_unlock(&mm->page_table_lock);
124 pmd_free(new_pmd); 115 pmd_free(new_pmd);
125 free_pgd_slow(new_pgd);
126 return NULL;
127
128no_pmd: 116no_pmd:
129 spin_unlock(&mm->page_table_lock);
130 free_pgd_slow(new_pgd); 117 free_pgd_slow(new_pgd);
131 return NULL;
132
133no_pgd: 118no_pgd:
134 return NULL; 119 return NULL;
135} 120}