diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:40 -0400 |
commit | 872fec16d9a0ed3b75b8893aa217e49cca575ee5 (patch) | |
tree | 1dfc8b9f2754bdfff645188e497865c00201d535 | |
parent | 46dea3d092d23a58b42499cc8a21de0fad079f4a (diff) |
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/alpha/mm/remap.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/consistent.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/arm26/mm/memc.c | 3 | ||||
-rw-r--r-- | arch/cris/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/frv/mm/dma-alloc.c | 5 | ||||
-rw-r--r-- | arch/i386/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 11 | ||||
-rw-r--r-- | arch/m32r/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/m68k/mm/kmap.c | 2 | ||||
-rw-r--r-- | arch/m68k/sun3x/dvma.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/pci-dma.c | 2 | ||||
-rw-r--r-- | arch/parisc/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/ppc/kernel/dma-mapping.c | 6 | ||||
-rw-r--r-- | arch/ppc/mm/4xx_mmu.c | 4 | ||||
-rw-r--r-- | arch/ppc/mm/pgtable.c | 4 | ||||
-rw-r--r-- | arch/ppc64/mm/imalloc.c | 5 | ||||
-rw-r--r-- | arch/ppc64/mm/init.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/sh64/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/x86_64/mm/ioremap.c | 4 | ||||
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/memory.c | 60 | ||||
-rw-r--r-- | mm/vmalloc.c | 4 |
26 files changed, 54 insertions, 114 deletions
diff --git a/arch/alpha/mm/remap.c b/arch/alpha/mm/remap.c index 19817ad3d89b..a78356c3ead5 100644 --- a/arch/alpha/mm/remap.c +++ b/arch/alpha/mm/remap.c | |||
@@ -2,7 +2,6 @@ | |||
2 | #include <asm/pgalloc.h> | 2 | #include <asm/pgalloc.h> |
3 | #include <asm/cacheflush.h> | 3 | #include <asm/cacheflush.h> |
4 | 4 | ||
5 | /* called with the page_table_lock held */ | ||
6 | static inline void | 5 | static inline void |
7 | remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | 6 | remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, |
8 | unsigned long phys_addr, unsigned long flags) | 7 | unsigned long phys_addr, unsigned long flags) |
@@ -31,7 +30,6 @@ remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | |||
31 | } while (address && (address < end)); | 30 | } while (address && (address < end)); |
32 | } | 31 | } |
33 | 32 | ||
34 | /* called with the page_table_lock held */ | ||
35 | static inline int | 33 | static inline int |
36 | remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | 34 | remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, |
37 | unsigned long phys_addr, unsigned long flags) | 35 | unsigned long phys_addr, unsigned long flags) |
@@ -46,7 +44,7 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | |||
46 | if (address >= end) | 44 | if (address >= end) |
47 | BUG(); | 45 | BUG(); |
48 | do { | 46 | do { |
49 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 47 | pte_t * pte = pte_alloc_kernel(pmd, address); |
50 | if (!pte) | 48 | if (!pte) |
51 | return -ENOMEM; | 49 | return -ENOMEM; |
52 | remap_area_pte(pte, address, end - address, | 50 | remap_area_pte(pte, address, end - address, |
@@ -70,7 +68,6 @@ __alpha_remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
70 | flush_cache_all(); | 68 | flush_cache_all(); |
71 | if (address >= end) | 69 | if (address >= end) |
72 | BUG(); | 70 | BUG(); |
73 | spin_lock(&init_mm.page_table_lock); | ||
74 | do { | 71 | do { |
75 | pmd_t *pmd; | 72 | pmd_t *pmd; |
76 | pmd = pmd_alloc(&init_mm, dir, address); | 73 | pmd = pmd_alloc(&init_mm, dir, address); |
@@ -84,7 +81,6 @@ __alpha_remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
84 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 81 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
85 | dir++; | 82 | dir++; |
86 | } while (address && (address < end)); | 83 | } while (address && (address < end)); |
87 | spin_unlock(&init_mm.page_table_lock); | ||
88 | return error; | 84 | return error; |
89 | } | 85 | } |
90 | 86 | ||
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index 82f4d5e27c54..47b0b767f080 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c | |||
@@ -397,8 +397,6 @@ static int __init consistent_init(void) | |||
397 | pte_t *pte; | 397 | pte_t *pte; |
398 | int ret = 0; | 398 | int ret = 0; |
399 | 399 | ||
400 | spin_lock(&init_mm.page_table_lock); | ||
401 | |||
402 | do { | 400 | do { |
403 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | 401 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); |
404 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); | 402 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); |
@@ -409,7 +407,7 @@ static int __init consistent_init(void) | |||
409 | } | 407 | } |
410 | WARN_ON(!pmd_none(*pmd)); | 408 | WARN_ON(!pmd_none(*pmd)); |
411 | 409 | ||
412 | pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); | 410 | pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); |
413 | if (!pte) { | 411 | if (!pte) { |
414 | printk(KERN_ERR "%s: no pte tables\n", __func__); | 412 | printk(KERN_ERR "%s: no pte tables\n", __func__); |
415 | ret = -ENOMEM; | 413 | ret = -ENOMEM; |
@@ -419,8 +417,6 @@ static int __init consistent_init(void) | |||
419 | consistent_pte = pte; | 417 | consistent_pte = pte; |
420 | } while (0); | 418 | } while (0); |
421 | 419 | ||
422 | spin_unlock(&init_mm.page_table_lock); | ||
423 | |||
424 | return ret; | 420 | return ret; |
425 | } | 421 | } |
426 | 422 | ||
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 6fb1258df1b5..0f128c28fee4 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -75,7 +75,7 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | |||
75 | 75 | ||
76 | pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags); | 76 | pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags); |
77 | do { | 77 | do { |
78 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 78 | pte_t * pte = pte_alloc_kernel(pmd, address); |
79 | if (!pte) | 79 | if (!pte) |
80 | return -ENOMEM; | 80 | return -ENOMEM; |
81 | remap_area_pte(pte, address, end - address, address + phys_addr, pgprot); | 81 | remap_area_pte(pte, address, end - address, address + phys_addr, pgprot); |
@@ -97,7 +97,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr, | |||
97 | phys_addr -= address; | 97 | phys_addr -= address; |
98 | dir = pgd_offset(&init_mm, address); | 98 | dir = pgd_offset(&init_mm, address); |
99 | BUG_ON(address >= end); | 99 | BUG_ON(address >= end); |
100 | spin_lock(&init_mm.page_table_lock); | ||
101 | do { | 100 | do { |
102 | pmd_t *pmd = pmd_alloc(&init_mm, dir, address); | 101 | pmd_t *pmd = pmd_alloc(&init_mm, dir, address); |
103 | if (!pmd) { | 102 | if (!pmd) { |
@@ -114,7 +113,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr, | |||
114 | dir++; | 113 | dir++; |
115 | } while (address && (address < end)); | 114 | } while (address && (address < end)); |
116 | 115 | ||
117 | spin_unlock(&init_mm.page_table_lock); | ||
118 | flush_cache_vmap(start, end); | 116 | flush_cache_vmap(start, end); |
119 | return err; | 117 | return err; |
120 | } | 118 | } |
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c index 8e8a2bb2487d..d6b008b8db76 100644 --- a/arch/arm26/mm/memc.c +++ b/arch/arm26/mm/memc.c | |||
@@ -92,7 +92,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
92 | if (!new_pmd) | 92 | if (!new_pmd) |
93 | goto no_pmd; | 93 | goto no_pmd; |
94 | 94 | ||
95 | new_pte = pte_alloc_kernel(mm, new_pmd, 0); | 95 | new_pte = pte_alloc_map(mm, new_pmd, 0); |
96 | if (!new_pte) | 96 | if (!new_pte) |
97 | goto no_pte; | 97 | goto no_pte; |
98 | 98 | ||
@@ -101,6 +101,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
101 | init_pte = pte_offset(init_pmd, 0); | 101 | init_pte = pte_offset(init_pmd, 0); |
102 | 102 | ||
103 | set_pte(new_pte, *init_pte); | 103 | set_pte(new_pte, *init_pte); |
104 | pte_unmap(new_pte); | ||
104 | 105 | ||
105 | /* | 106 | /* |
106 | * the page table entries are zeroed | 107 | * the page table entries are zeroed |
diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c index ebba11e270fa..a92ac9877582 100644 --- a/arch/cris/mm/ioremap.c +++ b/arch/cris/mm/ioremap.c | |||
@@ -52,7 +52,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo | |||
52 | if (address >= end) | 52 | if (address >= end) |
53 | BUG(); | 53 | BUG(); |
54 | do { | 54 | do { |
55 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 55 | pte_t * pte = pte_alloc_kernel(pmd, address); |
56 | if (!pte) | 56 | if (!pte) |
57 | return -ENOMEM; | 57 | return -ENOMEM; |
58 | remap_area_pte(pte, address, end - address, address + phys_addr, prot); | 58 | remap_area_pte(pte, address, end - address, address + phys_addr, prot); |
@@ -74,7 +74,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
74 | flush_cache_all(); | 74 | flush_cache_all(); |
75 | if (address >= end) | 75 | if (address >= end) |
76 | BUG(); | 76 | BUG(); |
77 | spin_lock(&init_mm.page_table_lock); | ||
78 | do { | 77 | do { |
79 | pud_t *pud; | 78 | pud_t *pud; |
80 | pmd_t *pmd; | 79 | pmd_t *pmd; |
@@ -94,7 +93,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
94 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 93 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
95 | dir++; | 94 | dir++; |
96 | } while (address && (address < end)); | 95 | } while (address && (address < end)); |
97 | spin_unlock(&init_mm.page_table_lock); | ||
98 | flush_tlb_all(); | 96 | flush_tlb_all(); |
99 | return error; | 97 | return error; |
100 | } | 98 | } |
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c index cfc4f97490c6..342823aad758 100644 --- a/arch/frv/mm/dma-alloc.c +++ b/arch/frv/mm/dma-alloc.c | |||
@@ -55,21 +55,18 @@ static int map_page(unsigned long va, unsigned long pa, pgprot_t prot) | |||
55 | pte_t *pte; | 55 | pte_t *pte; |
56 | int err = -ENOMEM; | 56 | int err = -ENOMEM; |
57 | 57 | ||
58 | spin_lock(&init_mm.page_table_lock); | ||
59 | |||
60 | /* Use upper 10 bits of VA to index the first level map */ | 58 | /* Use upper 10 bits of VA to index the first level map */ |
61 | pge = pgd_offset_k(va); | 59 | pge = pgd_offset_k(va); |
62 | pue = pud_offset(pge, va); | 60 | pue = pud_offset(pge, va); |
63 | pme = pmd_offset(pue, va); | 61 | pme = pmd_offset(pue, va); |
64 | 62 | ||
65 | /* Use middle 10 bits of VA to index the second-level map */ | 63 | /* Use middle 10 bits of VA to index the second-level map */ |
66 | pte = pte_alloc_kernel(&init_mm, pme, va); | 64 | pte = pte_alloc_kernel(pme, va); |
67 | if (pte != 0) { | 65 | if (pte != 0) { |
68 | err = 0; | 66 | err = 0; |
69 | set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot)); | 67 | set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot)); |
70 | } | 68 | } |
71 | 69 | ||
72 | spin_unlock(&init_mm.page_table_lock); | ||
73 | return err; | 70 | return err; |
74 | } | 71 | } |
75 | 72 | ||
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c index f379b8d67558..5d09de8d1c6b 100644 --- a/arch/i386/mm/ioremap.c +++ b/arch/i386/mm/ioremap.c | |||
@@ -28,7 +28,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | |||
28 | unsigned long pfn; | 28 | unsigned long pfn; |
29 | 29 | ||
30 | pfn = phys_addr >> PAGE_SHIFT; | 30 | pfn = phys_addr >> PAGE_SHIFT; |
31 | pte = pte_alloc_kernel(&init_mm, pmd, addr); | 31 | pte = pte_alloc_kernel(pmd, addr); |
32 | if (!pte) | 32 | if (!pte) |
33 | return -ENOMEM; | 33 | return -ENOMEM; |
34 | do { | 34 | do { |
@@ -87,14 +87,12 @@ static int ioremap_page_range(unsigned long addr, | |||
87 | flush_cache_all(); | 87 | flush_cache_all(); |
88 | phys_addr -= addr; | 88 | phys_addr -= addr; |
89 | pgd = pgd_offset_k(addr); | 89 | pgd = pgd_offset_k(addr); |
90 | spin_lock(&init_mm.page_table_lock); | ||
91 | do { | 90 | do { |
92 | next = pgd_addr_end(addr, end); | 91 | next = pgd_addr_end(addr, end); |
93 | err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); | 92 | err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); |
94 | if (err) | 93 | if (err) |
95 | break; | 94 | break; |
96 | } while (pgd++, addr = next, addr != end); | 95 | } while (pgd++, addr = next, addr != end); |
97 | spin_unlock(&init_mm.page_table_lock); | ||
98 | flush_tlb_all(); | 96 | flush_tlb_all(); |
99 | return err; | 97 | return err; |
100 | } | 98 | } |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 0063b2c50908..e3215ba64ffd 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -275,26 +275,21 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) | |||
275 | 275 | ||
276 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ | 276 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ |
277 | 277 | ||
278 | spin_lock(&init_mm.page_table_lock); | ||
279 | { | 278 | { |
280 | pud = pud_alloc(&init_mm, pgd, address); | 279 | pud = pud_alloc(&init_mm, pgd, address); |
281 | if (!pud) | 280 | if (!pud) |
282 | goto out; | 281 | goto out; |
283 | |||
284 | pmd = pmd_alloc(&init_mm, pud, address); | 282 | pmd = pmd_alloc(&init_mm, pud, address); |
285 | if (!pmd) | 283 | if (!pmd) |
286 | goto out; | 284 | goto out; |
287 | pte = pte_alloc_map(&init_mm, pmd, address); | 285 | pte = pte_alloc_kernel(pmd, address); |
288 | if (!pte) | 286 | if (!pte) |
289 | goto out; | 287 | goto out; |
290 | if (!pte_none(*pte)) { | 288 | if (!pte_none(*pte)) |
291 | pte_unmap(pte); | ||
292 | goto out; | 289 | goto out; |
293 | } | ||
294 | set_pte(pte, mk_pte(page, pgprot)); | 290 | set_pte(pte, mk_pte(page, pgprot)); |
295 | pte_unmap(pte); | ||
296 | } | 291 | } |
297 | out: spin_unlock(&init_mm.page_table_lock); | 292 | out: |
298 | /* no need for flush_tlb */ | 293 | /* no need for flush_tlb */ |
299 | return page; | 294 | return page; |
300 | } | 295 | } |
diff --git a/arch/m32r/mm/ioremap.c b/arch/m32r/mm/ioremap.c index 70c59055c19c..a151849a605e 100644 --- a/arch/m32r/mm/ioremap.c +++ b/arch/m32r/mm/ioremap.c | |||
@@ -67,7 +67,7 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | |||
67 | if (address >= end) | 67 | if (address >= end) |
68 | BUG(); | 68 | BUG(); |
69 | do { | 69 | do { |
70 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 70 | pte_t * pte = pte_alloc_kernel(pmd, address); |
71 | if (!pte) | 71 | if (!pte) |
72 | return -ENOMEM; | 72 | return -ENOMEM; |
73 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 73 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -90,7 +90,6 @@ remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
90 | flush_cache_all(); | 90 | flush_cache_all(); |
91 | if (address >= end) | 91 | if (address >= end) |
92 | BUG(); | 92 | BUG(); |
93 | spin_lock(&init_mm.page_table_lock); | ||
94 | do { | 93 | do { |
95 | pmd_t *pmd; | 94 | pmd_t *pmd; |
96 | pmd = pmd_alloc(&init_mm, dir, address); | 95 | pmd = pmd_alloc(&init_mm, dir, address); |
@@ -104,7 +103,6 @@ remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
104 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 103 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
105 | dir++; | 104 | dir++; |
106 | } while (address && (address < end)); | 105 | } while (address && (address < end)); |
107 | spin_unlock(&init_mm.page_table_lock); | ||
108 | flush_tlb_all(); | 106 | flush_tlb_all(); |
109 | return error; | 107 | return error; |
110 | } | 108 | } |
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 5dcb3fa35ea9..fe2383e36b06 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c | |||
@@ -201,7 +201,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) | |||
201 | virtaddr += PTRTREESIZE; | 201 | virtaddr += PTRTREESIZE; |
202 | size -= PTRTREESIZE; | 202 | size -= PTRTREESIZE; |
203 | } else { | 203 | } else { |
204 | pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr); | 204 | pte_dir = pte_alloc_kernel(pmd_dir, virtaddr); |
205 | if (!pte_dir) { | 205 | if (!pte_dir) { |
206 | printk("ioremap: no mem for pte_dir\n"); | 206 | printk("ioremap: no mem for pte_dir\n"); |
207 | return NULL; | 207 | return NULL; |
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c index 32e55adfeb8e..117481e86305 100644 --- a/arch/m68k/sun3x/dvma.c +++ b/arch/m68k/sun3x/dvma.c | |||
@@ -116,7 +116,7 @@ inline int dvma_map_cpu(unsigned long kaddr, | |||
116 | pte_t *pte; | 116 | pte_t *pte; |
117 | unsigned long end3; | 117 | unsigned long end3; |
118 | 118 | ||
119 | if((pte = pte_alloc_kernel(&init_mm, pmd, vaddr)) == NULL) { | 119 | if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) { |
120 | ret = -ENOMEM; | 120 | ret = -ENOMEM; |
121 | goto out; | 121 | goto out; |
122 | } | 122 | } |
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 9c44ca70befa..3101d1db5592 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c | |||
@@ -55,7 +55,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, | |||
55 | if (address >= end) | 55 | if (address >= end) |
56 | BUG(); | 56 | BUG(); |
57 | do { | 57 | do { |
58 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 58 | pte_t * pte = pte_alloc_kernel(pmd, address); |
59 | if (!pte) | 59 | if (!pte) |
60 | return -ENOMEM; | 60 | return -ENOMEM; |
61 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 61 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -77,7 +77,6 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr, | |||
77 | flush_cache_all(); | 77 | flush_cache_all(); |
78 | if (address >= end) | 78 | if (address >= end) |
79 | BUG(); | 79 | BUG(); |
80 | spin_lock(&init_mm.page_table_lock); | ||
81 | do { | 80 | do { |
82 | pud_t *pud; | 81 | pud_t *pud; |
83 | pmd_t *pmd; | 82 | pmd_t *pmd; |
@@ -96,7 +95,6 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr, | |||
96 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 95 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
97 | dir++; | 96 | dir++; |
98 | } while (address && (address < end)); | 97 | } while (address && (address < end)); |
99 | spin_unlock(&init_mm.page_table_lock); | ||
100 | flush_tlb_all(); | 98 | flush_tlb_all(); |
101 | return error; | 99 | return error; |
102 | } | 100 | } |
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index ae6213d71670..f94a02ef3d95 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -114,7 +114,7 @@ static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, | |||
114 | if (end > PGDIR_SIZE) | 114 | if (end > PGDIR_SIZE) |
115 | end = PGDIR_SIZE; | 115 | end = PGDIR_SIZE; |
116 | do { | 116 | do { |
117 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr); | 117 | pte_t * pte = pte_alloc_kernel(pmd, vaddr); |
118 | if (!pte) | 118 | if (!pte) |
119 | return -ENOMEM; | 119 | return -ENOMEM; |
120 | if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) | 120 | if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) |
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index f2df502cdae3..5c7a1b3b9326 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c | |||
@@ -52,7 +52,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo | |||
52 | if (address >= end) | 52 | if (address >= end) |
53 | BUG(); | 53 | BUG(); |
54 | do { | 54 | do { |
55 | pte_t * pte = pte_alloc_kernel(NULL, pmd, address); | 55 | pte_t * pte = pte_alloc_kernel(pmd, address); |
56 | if (!pte) | 56 | if (!pte) |
57 | return -ENOMEM; | 57 | return -ENOMEM; |
58 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 58 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -75,10 +75,9 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
75 | flush_cache_all(); | 75 | flush_cache_all(); |
76 | if (address >= end) | 76 | if (address >= end) |
77 | BUG(); | 77 | BUG(); |
78 | spin_lock(&init_mm.page_table_lock); | ||
79 | do { | 78 | do { |
80 | pmd_t *pmd; | 79 | pmd_t *pmd; |
81 | pmd = pmd_alloc(dir, address); | 80 | pmd = pmd_alloc(&init_mm, dir, address); |
82 | error = -ENOMEM; | 81 | error = -ENOMEM; |
83 | if (!pmd) | 82 | if (!pmd) |
84 | break; | 83 | break; |
@@ -89,7 +88,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
89 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 88 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
90 | dir++; | 89 | dir++; |
91 | } while (address && (address < end)); | 90 | } while (address && (address < end)); |
92 | spin_unlock(&init_mm.page_table_lock); | ||
93 | flush_tlb_all(); | 91 | flush_tlb_all(); |
94 | return error; | 92 | return error; |
95 | } | 93 | } |
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c index 0f710d2baec6..685fd0defe23 100644 --- a/arch/ppc/kernel/dma-mapping.c +++ b/arch/ppc/kernel/dma-mapping.c | |||
@@ -335,8 +335,6 @@ static int __init dma_alloc_init(void) | |||
335 | pte_t *pte; | 335 | pte_t *pte; |
336 | int ret = 0; | 336 | int ret = 0; |
337 | 337 | ||
338 | spin_lock(&init_mm.page_table_lock); | ||
339 | |||
340 | do { | 338 | do { |
341 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | 339 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); |
342 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); | 340 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); |
@@ -347,7 +345,7 @@ static int __init dma_alloc_init(void) | |||
347 | } | 345 | } |
348 | WARN_ON(!pmd_none(*pmd)); | 346 | WARN_ON(!pmd_none(*pmd)); |
349 | 347 | ||
350 | pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); | 348 | pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); |
351 | if (!pte) { | 349 | if (!pte) { |
352 | printk(KERN_ERR "%s: no pte tables\n", __func__); | 350 | printk(KERN_ERR "%s: no pte tables\n", __func__); |
353 | ret = -ENOMEM; | 351 | ret = -ENOMEM; |
@@ -357,8 +355,6 @@ static int __init dma_alloc_init(void) | |||
357 | consistent_pte = pte; | 355 | consistent_pte = pte; |
358 | } while (0); | 356 | } while (0); |
359 | 357 | ||
360 | spin_unlock(&init_mm.page_table_lock); | ||
361 | |||
362 | return ret; | 358 | return ret; |
363 | } | 359 | } |
364 | 360 | ||
diff --git a/arch/ppc/mm/4xx_mmu.c b/arch/ppc/mm/4xx_mmu.c index b7bcbc232f39..4d006aa1a0d1 100644 --- a/arch/ppc/mm/4xx_mmu.c +++ b/arch/ppc/mm/4xx_mmu.c | |||
@@ -110,13 +110,11 @@ unsigned long __init mmu_mapin_ram(void) | |||
110 | pmd_t *pmdp; | 110 | pmd_t *pmdp; |
111 | unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; | 111 | unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; |
112 | 112 | ||
113 | spin_lock(&init_mm.page_table_lock); | ||
114 | pmdp = pmd_offset(pgd_offset_k(v), v); | 113 | pmdp = pmd_offset(pgd_offset_k(v), v); |
115 | pmd_val(*pmdp++) = val; | 114 | pmd_val(*pmdp++) = val; |
116 | pmd_val(*pmdp++) = val; | 115 | pmd_val(*pmdp++) = val; |
117 | pmd_val(*pmdp++) = val; | 116 | pmd_val(*pmdp++) = val; |
118 | pmd_val(*pmdp++) = val; | 117 | pmd_val(*pmdp++) = val; |
119 | spin_unlock(&init_mm.page_table_lock); | ||
120 | 118 | ||
121 | v += LARGE_PAGE_SIZE_16M; | 119 | v += LARGE_PAGE_SIZE_16M; |
122 | p += LARGE_PAGE_SIZE_16M; | 120 | p += LARGE_PAGE_SIZE_16M; |
@@ -127,10 +125,8 @@ unsigned long __init mmu_mapin_ram(void) | |||
127 | pmd_t *pmdp; | 125 | pmd_t *pmdp; |
128 | unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; | 126 | unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; |
129 | 127 | ||
130 | spin_lock(&init_mm.page_table_lock); | ||
131 | pmdp = pmd_offset(pgd_offset_k(v), v); | 128 | pmdp = pmd_offset(pgd_offset_k(v), v); |
132 | pmd_val(*pmdp) = val; | 129 | pmd_val(*pmdp) = val; |
133 | spin_unlock(&init_mm.page_table_lock); | ||
134 | 130 | ||
135 | v += LARGE_PAGE_SIZE_4M; | 131 | v += LARGE_PAGE_SIZE_4M; |
136 | p += LARGE_PAGE_SIZE_4M; | 132 | p += LARGE_PAGE_SIZE_4M; |
diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index 43505b1fc5d8..6ea9185fd120 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c | |||
@@ -280,18 +280,16 @@ map_page(unsigned long va, phys_addr_t pa, int flags) | |||
280 | pte_t *pg; | 280 | pte_t *pg; |
281 | int err = -ENOMEM; | 281 | int err = -ENOMEM; |
282 | 282 | ||
283 | spin_lock(&init_mm.page_table_lock); | ||
284 | /* Use upper 10 bits of VA to index the first level map */ | 283 | /* Use upper 10 bits of VA to index the first level map */ |
285 | pd = pmd_offset(pgd_offset_k(va), va); | 284 | pd = pmd_offset(pgd_offset_k(va), va); |
286 | /* Use middle 10 bits of VA to index the second-level map */ | 285 | /* Use middle 10 bits of VA to index the second-level map */ |
287 | pg = pte_alloc_kernel(&init_mm, pd, va); | 286 | pg = pte_alloc_kernel(pd, va); |
288 | if (pg != 0) { | 287 | if (pg != 0) { |
289 | err = 0; | 288 | err = 0; |
290 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); | 289 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); |
291 | if (mem_init_done) | 290 | if (mem_init_done) |
292 | flush_HPTE(0, va, pmd_val(*pd)); | 291 | flush_HPTE(0, va, pmd_val(*pd)); |
293 | } | 292 | } |
294 | spin_unlock(&init_mm.page_table_lock); | ||
295 | return err; | 293 | return err; |
296 | } | 294 | } |
297 | 295 | ||
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c index c65b87b92756..f4ca29cf5364 100644 --- a/arch/ppc64/mm/imalloc.c +++ b/arch/ppc64/mm/imalloc.c | |||
@@ -300,12 +300,7 @@ void im_free(void * addr) | |||
300 | for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { | 300 | for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { |
301 | if (tmp->addr == addr) { | 301 | if (tmp->addr == addr) { |
302 | *p = tmp->next; | 302 | *p = tmp->next; |
303 | |||
304 | /* XXX: do we need the lock? */ | ||
305 | spin_lock(&init_mm.page_table_lock); | ||
306 | unmap_vm_area(tmp); | 303 | unmap_vm_area(tmp); |
307 | spin_unlock(&init_mm.page_table_lock); | ||
308 | |||
309 | kfree(tmp); | 304 | kfree(tmp); |
310 | up(&imlist_sem); | 305 | up(&imlist_sem); |
311 | return; | 306 | return; |
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index be64b157afce..a45584b3440c 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c | |||
@@ -155,7 +155,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
155 | unsigned long vsid; | 155 | unsigned long vsid; |
156 | 156 | ||
157 | if (mem_init_done) { | 157 | if (mem_init_done) { |
158 | spin_lock(&init_mm.page_table_lock); | ||
159 | pgdp = pgd_offset_k(ea); | 158 | pgdp = pgd_offset_k(ea); |
160 | pudp = pud_alloc(&init_mm, pgdp, ea); | 159 | pudp = pud_alloc(&init_mm, pgdp, ea); |
161 | if (!pudp) | 160 | if (!pudp) |
@@ -163,12 +162,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
163 | pmdp = pmd_alloc(&init_mm, pudp, ea); | 162 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
164 | if (!pmdp) | 163 | if (!pmdp) |
165 | return -ENOMEM; | 164 | return -ENOMEM; |
166 | ptep = pte_alloc_kernel(&init_mm, pmdp, ea); | 165 | ptep = pte_alloc_kernel(pmdp, ea); |
167 | if (!ptep) | 166 | if (!ptep) |
168 | return -ENOMEM; | 167 | return -ENOMEM; |
169 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | 168 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
170 | __pgprot(flags))); | 169 | __pgprot(flags))); |
171 | spin_unlock(&init_mm.page_table_lock); | ||
172 | } else { | 170 | } else { |
173 | unsigned long va, vpn, hash, hpteg; | 171 | unsigned long va, vpn, hash, hpteg; |
174 | 172 | ||
diff --git a/arch/s390/mm/ioremap.c b/arch/s390/mm/ioremap.c index c6c39d868bc8..0f6e9ecbefe2 100644 --- a/arch/s390/mm/ioremap.c +++ b/arch/s390/mm/ioremap.c | |||
@@ -58,7 +58,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo | |||
58 | if (address >= end) | 58 | if (address >= end) |
59 | BUG(); | 59 | BUG(); |
60 | do { | 60 | do { |
61 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 61 | pte_t * pte = pte_alloc_kernel(pmd, address); |
62 | if (!pte) | 62 | if (!pte) |
63 | return -ENOMEM; | 63 | return -ENOMEM; |
64 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 64 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -80,7 +80,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
80 | flush_cache_all(); | 80 | flush_cache_all(); |
81 | if (address >= end) | 81 | if (address >= end) |
82 | BUG(); | 82 | BUG(); |
83 | spin_lock(&init_mm.page_table_lock); | ||
84 | do { | 83 | do { |
85 | pmd_t *pmd; | 84 | pmd_t *pmd; |
86 | pmd = pmd_alloc(&init_mm, dir, address); | 85 | pmd = pmd_alloc(&init_mm, dir, address); |
@@ -94,7 +93,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
94 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 93 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
95 | dir++; | 94 | dir++; |
96 | } while (address && (address < end)); | 95 | } while (address && (address < end)); |
97 | spin_unlock(&init_mm.page_table_lock); | ||
98 | flush_tlb_all(); | 96 | flush_tlb_all(); |
99 | return 0; | 97 | return 0; |
100 | } | 98 | } |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 9f490c2742f0..e794e27a72f1 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -57,7 +57,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, | |||
57 | if (address >= end) | 57 | if (address >= end) |
58 | BUG(); | 58 | BUG(); |
59 | do { | 59 | do { |
60 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 60 | pte_t * pte = pte_alloc_kernel(pmd, address); |
61 | if (!pte) | 61 | if (!pte) |
62 | return -ENOMEM; | 62 | return -ENOMEM; |
63 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 63 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -79,7 +79,6 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
79 | flush_cache_all(); | 79 | flush_cache_all(); |
80 | if (address >= end) | 80 | if (address >= end) |
81 | BUG(); | 81 | BUG(); |
82 | spin_lock(&init_mm.page_table_lock); | ||
83 | do { | 82 | do { |
84 | pmd_t *pmd; | 83 | pmd_t *pmd; |
85 | pmd = pmd_alloc(&init_mm, dir, address); | 84 | pmd = pmd_alloc(&init_mm, dir, address); |
@@ -93,7 +92,6 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
93 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 92 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
94 | dir++; | 93 | dir++; |
95 | } while (address && (address < end)); | 94 | } while (address && (address < end)); |
96 | spin_unlock(&init_mm.page_table_lock); | ||
97 | flush_tlb_all(); | 95 | flush_tlb_all(); |
98 | return error; | 96 | return error; |
99 | } | 97 | } |
diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c index f4003da556bc..fb1866fa2c9d 100644 --- a/arch/sh64/mm/ioremap.c +++ b/arch/sh64/mm/ioremap.c | |||
@@ -79,7 +79,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo | |||
79 | BUG(); | 79 | BUG(); |
80 | 80 | ||
81 | do { | 81 | do { |
82 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 82 | pte_t * pte = pte_alloc_kernel(pmd, address); |
83 | if (!pte) | 83 | if (!pte) |
84 | return -ENOMEM; | 84 | return -ENOMEM; |
85 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 85 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -101,7 +101,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
101 | flush_cache_all(); | 101 | flush_cache_all(); |
102 | if (address >= end) | 102 | if (address >= end) |
103 | BUG(); | 103 | BUG(); |
104 | spin_lock(&init_mm.page_table_lock); | ||
105 | do { | 104 | do { |
106 | pmd_t *pmd = pmd_alloc(&init_mm, dir, address); | 105 | pmd_t *pmd = pmd_alloc(&init_mm, dir, address); |
107 | error = -ENOMEM; | 106 | error = -ENOMEM; |
@@ -115,7 +114,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
115 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 114 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
116 | dir++; | 115 | dir++; |
117 | } while (address && (address < end)); | 116 | } while (address && (address < end)); |
118 | spin_unlock(&init_mm.page_table_lock); | ||
119 | flush_tlb_all(); | 117 | flush_tlb_all(); |
120 | return 0; | 118 | return 0; |
121 | } | 119 | } |
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index 6972df480d2b..ecf7acb5db9b 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c | |||
@@ -60,7 +60,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo | |||
60 | if (address >= end) | 60 | if (address >= end) |
61 | BUG(); | 61 | BUG(); |
62 | do { | 62 | do { |
63 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | 63 | pte_t * pte = pte_alloc_kernel(pmd, address); |
64 | if (!pte) | 64 | if (!pte) |
65 | return -ENOMEM; | 65 | return -ENOMEM; |
66 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | 66 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); |
@@ -105,7 +105,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
105 | flush_cache_all(); | 105 | flush_cache_all(); |
106 | if (address >= end) | 106 | if (address >= end) |
107 | BUG(); | 107 | BUG(); |
108 | spin_lock(&init_mm.page_table_lock); | ||
109 | do { | 108 | do { |
110 | pud_t *pud; | 109 | pud_t *pud; |
111 | pud = pud_alloc(&init_mm, pgd, address); | 110 | pud = pud_alloc(&init_mm, pgd, address); |
@@ -119,7 +118,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, | |||
119 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | 118 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
120 | pgd++; | 119 | pgd++; |
121 | } while (address && (address < end)); | 120 | } while (address && (address < end)); |
122 | spin_unlock(&init_mm.page_table_lock); | ||
123 | flush_tlb_all(); | 121 | flush_tlb_all(); |
124 | return error; | 122 | return error; |
125 | } | 123 | } |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 89398032bc4b..b9fa82b96d9e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -706,7 +706,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
706 | extern int vmtruncate(struct inode * inode, loff_t offset); | 706 | extern int vmtruncate(struct inode * inode, loff_t offset); |
707 | extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)); | 707 | extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)); |
708 | extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)); | 708 | extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)); |
709 | extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); | 709 | extern pte_t *FASTCALL(pte_alloc_kernel(pmd_t *pmd, unsigned long address)); |
710 | extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); | 710 | extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); |
711 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); | 711 | extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); |
712 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); | 712 | extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); |
diff --git a/mm/memory.c b/mm/memory.c index 692ad810263d..95a4553c75f7 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -307,28 +307,22 @@ out: | |||
307 | return pte_offset_map(pmd, address); | 307 | return pte_offset_map(pmd, address); |
308 | } | 308 | } |
309 | 309 | ||
310 | pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) | 310 | pte_t fastcall * pte_alloc_kernel(pmd_t *pmd, unsigned long address) |
311 | { | 311 | { |
312 | if (!pmd_present(*pmd)) { | 312 | if (!pmd_present(*pmd)) { |
313 | pte_t *new; | 313 | pte_t *new; |
314 | 314 | ||
315 | spin_unlock(&mm->page_table_lock); | 315 | new = pte_alloc_one_kernel(&init_mm, address); |
316 | new = pte_alloc_one_kernel(mm, address); | ||
317 | spin_lock(&mm->page_table_lock); | ||
318 | if (!new) | 316 | if (!new) |
319 | return NULL; | 317 | return NULL; |
320 | 318 | ||
321 | /* | 319 | spin_lock(&init_mm.page_table_lock); |
322 | * Because we dropped the lock, we should re-check the | 320 | if (pmd_present(*pmd)) |
323 | * entry, as somebody else could have populated it.. | ||
324 | */ | ||
325 | if (pmd_present(*pmd)) { | ||
326 | pte_free_kernel(new); | 321 | pte_free_kernel(new); |
327 | goto out; | 322 | else |
328 | } | 323 | pmd_populate_kernel(&init_mm, pmd, new); |
329 | pmd_populate_kernel(mm, pmd, new); | 324 | spin_unlock(&init_mm.page_table_lock); |
330 | } | 325 | } |
331 | out: | ||
332 | return pte_offset_kernel(pmd, address); | 326 | return pte_offset_kernel(pmd, address); |
333 | } | 327 | } |
334 | 328 | ||
@@ -2097,30 +2091,30 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2097 | #ifndef __PAGETABLE_PUD_FOLDED | 2091 | #ifndef __PAGETABLE_PUD_FOLDED |
2098 | /* | 2092 | /* |
2099 | * Allocate page upper directory. | 2093 | * Allocate page upper directory. |
2100 | * | 2094 | * We've already handled the fast-path in-line. |
2101 | * We've already handled the fast-path in-line, and we own the | ||
2102 | * page table lock. | ||
2103 | */ | 2095 | */ |
2104 | pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | 2096 | pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) |
2105 | { | 2097 | { |
2106 | pud_t *new; | 2098 | pud_t *new; |
2107 | 2099 | ||
2108 | spin_unlock(&mm->page_table_lock); | 2100 | if (mm != &init_mm) /* Temporary bridging hack */ |
2101 | spin_unlock(&mm->page_table_lock); | ||
2109 | new = pud_alloc_one(mm, address); | 2102 | new = pud_alloc_one(mm, address); |
2110 | spin_lock(&mm->page_table_lock); | 2103 | if (!new) { |
2111 | if (!new) | 2104 | if (mm != &init_mm) /* Temporary bridging hack */ |
2105 | spin_lock(&mm->page_table_lock); | ||
2112 | return NULL; | 2106 | return NULL; |
2107 | } | ||
2113 | 2108 | ||
2114 | /* | 2109 | spin_lock(&mm->page_table_lock); |
2115 | * Because we dropped the lock, we should re-check the | ||
2116 | * entry, as somebody else could have populated it.. | ||
2117 | */ | ||
2118 | if (pgd_present(*pgd)) { | 2110 | if (pgd_present(*pgd)) { |
2119 | pud_free(new); | 2111 | pud_free(new); |
2120 | goto out; | 2112 | goto out; |
2121 | } | 2113 | } |
2122 | pgd_populate(mm, pgd, new); | 2114 | pgd_populate(mm, pgd, new); |
2123 | out: | 2115 | out: |
2116 | if (mm == &init_mm) /* Temporary bridging hack */ | ||
2117 | spin_unlock(&mm->page_table_lock); | ||
2124 | return pud_offset(pgd, address); | 2118 | return pud_offset(pgd, address); |
2125 | } | 2119 | } |
2126 | #endif /* __PAGETABLE_PUD_FOLDED */ | 2120 | #endif /* __PAGETABLE_PUD_FOLDED */ |
@@ -2128,24 +2122,22 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr | |||
2128 | #ifndef __PAGETABLE_PMD_FOLDED | 2122 | #ifndef __PAGETABLE_PMD_FOLDED |
2129 | /* | 2123 | /* |
2130 | * Allocate page middle directory. | 2124 | * Allocate page middle directory. |
2131 | * | 2125 | * We've already handled the fast-path in-line. |
2132 | * We've already handled the fast-path in-line, and we own the | ||
2133 | * page table lock. | ||
2134 | */ | 2126 | */ |
2135 | pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | 2127 | pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) |
2136 | { | 2128 | { |
2137 | pmd_t *new; | 2129 | pmd_t *new; |
2138 | 2130 | ||
2139 | spin_unlock(&mm->page_table_lock); | 2131 | if (mm != &init_mm) /* Temporary bridging hack */ |
2132 | spin_unlock(&mm->page_table_lock); | ||
2140 | new = pmd_alloc_one(mm, address); | 2133 | new = pmd_alloc_one(mm, address); |
2141 | spin_lock(&mm->page_table_lock); | 2134 | if (!new) { |
2142 | if (!new) | 2135 | if (mm != &init_mm) /* Temporary bridging hack */ |
2136 | spin_lock(&mm->page_table_lock); | ||
2143 | return NULL; | 2137 | return NULL; |
2138 | } | ||
2144 | 2139 | ||
2145 | /* | 2140 | spin_lock(&mm->page_table_lock); |
2146 | * Because we dropped the lock, we should re-check the | ||
2147 | * entry, as somebody else could have populated it.. | ||
2148 | */ | ||
2149 | #ifndef __ARCH_HAS_4LEVEL_HACK | 2141 | #ifndef __ARCH_HAS_4LEVEL_HACK |
2150 | if (pud_present(*pud)) { | 2142 | if (pud_present(*pud)) { |
2151 | pmd_free(new); | 2143 | pmd_free(new); |
@@ -2161,6 +2153,8 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr | |||
2161 | #endif /* __ARCH_HAS_4LEVEL_HACK */ | 2153 | #endif /* __ARCH_HAS_4LEVEL_HACK */ |
2162 | 2154 | ||
2163 | out: | 2155 | out: |
2156 | if (mm == &init_mm) /* Temporary bridging hack */ | ||
2157 | spin_unlock(&mm->page_table_lock); | ||
2164 | return pmd_offset(pud, address); | 2158 | return pmd_offset(pud, address); |
2165 | } | 2159 | } |
2166 | #endif /* __PAGETABLE_PMD_FOLDED */ | 2160 | #endif /* __PAGETABLE_PMD_FOLDED */ |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5e9120598799..54a90e83cb31 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -89,7 +89,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, | |||
89 | { | 89 | { |
90 | pte_t *pte; | 90 | pte_t *pte; |
91 | 91 | ||
92 | pte = pte_alloc_kernel(&init_mm, pmd, addr); | 92 | pte = pte_alloc_kernel(pmd, addr); |
93 | if (!pte) | 93 | if (!pte) |
94 | return -ENOMEM; | 94 | return -ENOMEM; |
95 | do { | 95 | do { |
@@ -147,14 +147,12 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) | |||
147 | 147 | ||
148 | BUG_ON(addr >= end); | 148 | BUG_ON(addr >= end); |
149 | pgd = pgd_offset_k(addr); | 149 | pgd = pgd_offset_k(addr); |
150 | spin_lock(&init_mm.page_table_lock); | ||
151 | do { | 150 | do { |
152 | next = pgd_addr_end(addr, end); | 151 | next = pgd_addr_end(addr, end); |
153 | err = vmap_pud_range(pgd, addr, next, prot, pages); | 152 | err = vmap_pud_range(pgd, addr, next, prot, pages); |
154 | if (err) | 153 | if (err) |
155 | break; | 154 | break; |
156 | } while (pgd++, addr = next, addr != end); | 155 | } while (pgd++, addr = next, addr != end); |
157 | spin_unlock(&init_mm.page_table_lock); | ||
158 | flush_cache_vmap((unsigned long) area->addr, end); | 156 | flush_cache_vmap((unsigned long) area->addr, end); |
159 | return err; | 157 | return err; |
160 | } | 158 | } |