diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-10-26 17:21:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 19:52:08 -0400 |
commit | 3e4d3af501cccdc8a8cca41bdbe57d54ad7e7e73 (patch) | |
tree | 2ce507f7ec7275563653e52f18606aba4f99b7f1 /arch | |
parent | 61ecdb801ef2cd28e32442383106d7837d76deac (diff) |
mm: stack based kmap_atomic()
Keep the current interface but ignore the KM_type and use a stack based
approach.
The advantage is that we get rid of crappy code like:
#define __KM_PTE \
(in_nmi() ? KM_NMI_PTE : \
in_irq() ? KM_IRQ_PTE : \
KM_PTE0)
and in general can stop worrying about what context we're in and what kmap
slots might be appropriate for that.
The downside is that FRV kmap_atomic() gets more expensive.
For now we use a CPP trick suggested by Andrew:
#define kmap_atomic(page, args...) __kmap_atomic(page)
to avoid having to touch all kmap_atomic() users in a single patch.
[ not compiled on:
- mn10300: the arch doesn't actually build with highmem to begin with ]
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c]
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/highmem.h | 6 | ||||
-rw-r--r-- | arch/arm/mm/highmem.c | 23 | ||||
-rw-r--r-- | arch/frv/include/asm/highmem.h | 25 | ||||
-rw-r--r-- | arch/frv/mb93090-mb00/pci-dma.c | 4 | ||||
-rw-r--r-- | arch/frv/mm/cache-page.c | 8 | ||||
-rw-r--r-- | arch/frv/mm/highmem.c | 50 | ||||
-rw-r--r-- | arch/mips/include/asm/highmem.h | 18 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 50 | ||||
-rw-r--r-- | arch/mn10300/include/asm/highmem.h | 42 | ||||
-rw-r--r-- | arch/powerpc/include/asm/highmem.h | 9 | ||||
-rw-r--r-- | arch/powerpc/mm/highmem.c | 35 | ||||
-rw-r--r-- | arch/sparc/include/asm/highmem.h | 4 | ||||
-rw-r--r-- | arch/sparc/mm/highmem.c | 48 | ||||
-rw-r--r-- | arch/tile/include/asm/highmem.h | 10 | ||||
-rw-r--r-- | arch/tile/mm/highmem.c | 85 | ||||
-rw-r--r-- | arch/x86/include/asm/highmem.h | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/iomap.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/crash_dump_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 75 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 42 |
20 files changed, 294 insertions, 257 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 5aff58126602..1fc684e70ab6 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); | |||
35 | #ifdef CONFIG_HIGHMEM | 35 | #ifdef CONFIG_HIGHMEM |
36 | extern void *kmap(struct page *page); | 36 | extern void *kmap(struct page *page); |
37 | extern void kunmap(struct page *page); | 37 | extern void kunmap(struct page *page); |
38 | extern void *kmap_atomic(struct page *page, enum km_type type); | 38 | extern void *__kmap_atomic(struct page *page); |
39 | extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); | 39 | extern void __kunmap_atomic(void *kvaddr); |
40 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 40 | extern void *kmap_atomic_pfn(unsigned long pfn); |
41 | extern struct page *kmap_atomic_to_page(const void *ptr); | 41 | extern struct page *kmap_atomic_to_page(const void *ptr); |
42 | #endif | 42 | #endif |
43 | 43 | ||
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 1fbdb55bfd1b..c00f119babbf 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -36,18 +36,17 @@ void kunmap(struct page *page) | |||
36 | } | 36 | } |
37 | EXPORT_SYMBOL(kunmap); | 37 | EXPORT_SYMBOL(kunmap); |
38 | 38 | ||
39 | void *kmap_atomic(struct page *page, enum km_type type) | 39 | void *__kmap_atomic(struct page *page) |
40 | { | 40 | { |
41 | unsigned int idx; | 41 | unsigned int idx; |
42 | unsigned long vaddr; | 42 | unsigned long vaddr; |
43 | void *kmap; | 43 | void *kmap; |
44 | int type; | ||
44 | 45 | ||
45 | pagefault_disable(); | 46 | pagefault_disable(); |
46 | if (!PageHighMem(page)) | 47 | if (!PageHighMem(page)) |
47 | return page_address(page); | 48 | return page_address(page); |
48 | 49 | ||
49 | debug_kmap_atomic(type); | ||
50 | |||
51 | #ifdef CONFIG_DEBUG_HIGHMEM | 50 | #ifdef CONFIG_DEBUG_HIGHMEM |
52 | /* | 51 | /* |
53 | * There is no cache coherency issue when non VIVT, so force the | 52 | * There is no cache coherency issue when non VIVT, so force the |
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
61 | if (kmap) | 60 | if (kmap) |
62 | return kmap; | 61 | return kmap; |
63 | 62 | ||
63 | type = kmap_atomic_idx_push(); | ||
64 | |||
64 | idx = type + KM_TYPE_NR * smp_processor_id(); | 65 | idx = type + KM_TYPE_NR * smp_processor_id(); |
65 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 66 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
66 | #ifdef CONFIG_DEBUG_HIGHMEM | 67 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
80 | 81 | ||
81 | return (void *)vaddr; | 82 | return (void *)vaddr; |
82 | } | 83 | } |
83 | EXPORT_SYMBOL(kmap_atomic); | 84 | EXPORT_SYMBOL(__kmap_atomic); |
84 | 85 | ||
85 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 86 | void __kunmap_atomic(void *kvaddr) |
86 | { | 87 | { |
87 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 88 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
88 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); | 89 | int idx, type; |
89 | 90 | ||
90 | if (kvaddr >= (void *)FIXADDR_START) { | 91 | if (kvaddr >= (void *)FIXADDR_START) { |
92 | type = kmap_atomic_idx_pop(); | ||
93 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
94 | |||
91 | if (cache_is_vivt()) | 95 | if (cache_is_vivt()) |
92 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 96 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); |
93 | #ifdef CONFIG_DEBUG_HIGHMEM | 97 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | |||
103 | } | 107 | } |
104 | pagefault_enable(); | 108 | pagefault_enable(); |
105 | } | 109 | } |
106 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | 110 | EXPORT_SYMBOL(__kunmap_atomic); |
107 | 111 | ||
108 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | 112 | void *kmap_atomic_pfn(unsigned long pfn) |
109 | { | 113 | { |
110 | unsigned int idx; | ||
111 | unsigned long vaddr; | 114 | unsigned long vaddr; |
115 | int idx, type; | ||
112 | 116 | ||
113 | pagefault_disable(); | 117 | pagefault_disable(); |
114 | 118 | ||
119 | type = kmap_atomic_idx_push(); | ||
115 | idx = type + KM_TYPE_NR * smp_processor_id(); | 120 | idx = type + KM_TYPE_NR * smp_processor_id(); |
116 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 121 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
117 | #ifdef CONFIG_DEBUG_HIGHMEM | 122 | #ifdef CONFIG_DEBUG_HIGHMEM |
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h index cb4c317eaecc..a8d6565d415d 100644 --- a/arch/frv/include/asm/highmem.h +++ b/arch/frv/include/asm/highmem.h | |||
@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr); | |||
112 | (void *) damlr; \ | 112 | (void *) damlr; \ |
113 | }) | 113 | }) |
114 | 114 | ||
115 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 115 | static inline void *kmap_atomic_primary(struct page *page, enum km_type type) |
116 | { | 116 | { |
117 | unsigned long paddr; | 117 | unsigned long paddr; |
118 | 118 | ||
119 | pagefault_disable(); | 119 | pagefault_disable(); |
120 | debug_kmap_atomic(type); | ||
121 | paddr = page_to_phys(page); | 120 | paddr = page_to_phys(page); |
122 | 121 | ||
123 | switch (type) { | 122 | switch (type) { |
@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) | |||
125 | case 1: return __kmap_atomic_primary(1, paddr, 3); | 124 | case 1: return __kmap_atomic_primary(1, paddr, 3); |
126 | case 2: return __kmap_atomic_primary(2, paddr, 4); | 125 | case 2: return __kmap_atomic_primary(2, paddr, 4); |
127 | case 3: return __kmap_atomic_primary(3, paddr, 5); | 126 | case 3: return __kmap_atomic_primary(3, paddr, 5); |
128 | case 4: return __kmap_atomic_primary(4, paddr, 6); | ||
129 | case 5: return __kmap_atomic_primary(5, paddr, 7); | ||
130 | case 6: return __kmap_atomic_primary(6, paddr, 8); | ||
131 | case 7: return __kmap_atomic_primary(7, paddr, 9); | ||
132 | case 8: return __kmap_atomic_primary(8, paddr, 10); | ||
133 | |||
134 | case 9 ... 9 + NR_TLB_LINES - 1: | ||
135 | return __kmap_atomic_secondary(type - 9, paddr); | ||
136 | 127 | ||
137 | default: | 128 | default: |
138 | BUG(); | 129 | BUG(); |
@@ -152,22 +143,13 @@ do { \ | |||
152 | asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ | 143 | asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ |
153 | } while(0) | 144 | } while(0) |
154 | 145 | ||
155 | static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 146 | static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type) |
156 | { | 147 | { |
157 | switch (type) { | 148 | switch (type) { |
158 | case 0: __kunmap_atomic_primary(0, 2); break; | 149 | case 0: __kunmap_atomic_primary(0, 2); break; |
159 | case 1: __kunmap_atomic_primary(1, 3); break; | 150 | case 1: __kunmap_atomic_primary(1, 3); break; |
160 | case 2: __kunmap_atomic_primary(2, 4); break; | 151 | case 2: __kunmap_atomic_primary(2, 4); break; |
161 | case 3: __kunmap_atomic_primary(3, 5); break; | 152 | case 3: __kunmap_atomic_primary(3, 5); break; |
162 | case 4: __kunmap_atomic_primary(4, 6); break; | ||
163 | case 5: __kunmap_atomic_primary(5, 7); break; | ||
164 | case 6: __kunmap_atomic_primary(6, 8); break; | ||
165 | case 7: __kunmap_atomic_primary(7, 9); break; | ||
166 | case 8: __kunmap_atomic_primary(8, 10); break; | ||
167 | |||
168 | case 9 ... 9 + NR_TLB_LINES - 1: | ||
169 | __kunmap_atomic_secondary(type - 9, kvaddr); | ||
170 | break; | ||
171 | 153 | ||
172 | default: | 154 | default: |
173 | BUG(); | 155 | BUG(); |
@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | |||
175 | pagefault_enable(); | 157 | pagefault_enable(); |
176 | } | 158 | } |
177 | 159 | ||
160 | void *__kmap_atomic(struct page *page); | ||
161 | void __kunmap_atomic(void *kvaddr); | ||
162 | |||
178 | #endif /* !__ASSEMBLY__ */ | 163 | #endif /* !__ASSEMBLY__ */ |
179 | 164 | ||
180 | #endif /* __KERNEL__ */ | 165 | #endif /* __KERNEL__ */ |
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 85d110b71cf7..41098a3803a2 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c | |||
@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
61 | dampr2 = __get_DAMPR(2); | 61 | dampr2 = __get_DAMPR(2); |
62 | 62 | ||
63 | for (i = 0; i < nents; i++) { | 63 | for (i = 0; i < nents; i++) { |
64 | vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE); | 64 | vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE); |
65 | 65 | ||
66 | frv_dcache_writeback((unsigned long) vaddr, | 66 | frv_dcache_writeback((unsigned long) vaddr, |
67 | (unsigned long) vaddr + PAGE_SIZE); | 67 | (unsigned long) vaddr + PAGE_SIZE); |
68 | 68 | ||
69 | } | 69 | } |
70 | 70 | ||
71 | kunmap_atomic(vaddr, __KM_CACHE); | 71 | kunmap_atomic_primary(vaddr, __KM_CACHE); |
72 | if (dampr2) { | 72 | if (dampr2) { |
73 | __set_DAMPR(2, dampr2); | 73 | __set_DAMPR(2, dampr2); |
74 | __set_IAMPR(2, dampr2); | 74 | __set_IAMPR(2, dampr2); |
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c index 0261cbe153b5..b24ade27a0f0 100644 --- a/arch/frv/mm/cache-page.c +++ b/arch/frv/mm/cache-page.c | |||
@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page) | |||
26 | 26 | ||
27 | dampr2 = __get_DAMPR(2); | 27 | dampr2 = __get_DAMPR(2); |
28 | 28 | ||
29 | vaddr = kmap_atomic(page, __KM_CACHE); | 29 | vaddr = kmap_atomic_primary(page, __KM_CACHE); |
30 | 30 | ||
31 | frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); | 31 | frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); |
32 | 32 | ||
33 | kunmap_atomic(vaddr, __KM_CACHE); | 33 | kunmap_atomic_primary(vaddr, __KM_CACHE); |
34 | 34 | ||
35 | if (dampr2) { | 35 | if (dampr2) { |
36 | __set_DAMPR(2, dampr2); | 36 | __set_DAMPR(2, dampr2); |
@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |||
54 | 54 | ||
55 | dampr2 = __get_DAMPR(2); | 55 | dampr2 = __get_DAMPR(2); |
56 | 56 | ||
57 | vaddr = kmap_atomic(page, __KM_CACHE); | 57 | vaddr = kmap_atomic_primary(page, __KM_CACHE); |
58 | 58 | ||
59 | start = (start & ~PAGE_MASK) | (unsigned long) vaddr; | 59 | start = (start & ~PAGE_MASK) | (unsigned long) vaddr; |
60 | frv_cache_wback_inv(start, start + len); | 60 | frv_cache_wback_inv(start, start + len); |
61 | 61 | ||
62 | kunmap_atomic(vaddr, __KM_CACHE); | 62 | kunmap_atomic_primary(vaddr, __KM_CACHE); |
63 | 63 | ||
64 | if (dampr2) { | 64 | if (dampr2) { |
65 | __set_DAMPR(2, dampr2); | 65 | __set_DAMPR(2, dampr2); |
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c index eadd07658075..61088dcc1594 100644 --- a/arch/frv/mm/highmem.c +++ b/arch/frv/mm/highmem.c | |||
@@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr) | |||
36 | { | 36 | { |
37 | return virt_to_page(ptr); | 37 | return virt_to_page(ptr); |
38 | } | 38 | } |
39 | |||
40 | void *__kmap_atomic(struct page *page) | ||
41 | { | ||
42 | unsigned long paddr; | ||
43 | int type; | ||
44 | |||
45 | pagefault_disable(); | ||
46 | type = kmap_atomic_idx_push(); | ||
47 | paddr = page_to_phys(page); | ||
48 | |||
49 | switch (type) { | ||
50 | /* | ||
51 | * The first 4 primary maps are reserved for architecture code | ||
52 | */ | ||
53 | case 0: return __kmap_atomic_primary(4, paddr, 6); | ||
54 | case 1: return __kmap_atomic_primary(5, paddr, 7); | ||
55 | case 2: return __kmap_atomic_primary(6, paddr, 8); | ||
56 | case 3: return __kmap_atomic_primary(7, paddr, 9); | ||
57 | case 4: return __kmap_atomic_primary(8, paddr, 10); | ||
58 | |||
59 | case 5 ... 5 + NR_TLB_LINES - 1: | ||
60 | return __kmap_atomic_secondary(type - 5, paddr); | ||
61 | |||
62 | default: | ||
63 | BUG(); | ||
64 | return NULL; | ||
65 | } | ||
66 | } | ||
67 | EXPORT_SYMBOL(__kmap_atomic); | ||
68 | |||
69 | void __kunmap_atomic(void *kvaddr) | ||
70 | { | ||
71 | int type = kmap_atomic_idx_pop(); | ||
72 | switch (type) { | ||
73 | case 0: __kunmap_atomic_primary(4, 6); break; | ||
74 | case 1: __kunmap_atomic_primary(5, 7); break; | ||
75 | case 2: __kunmap_atomic_primary(6, 8); break; | ||
76 | case 3: __kunmap_atomic_primary(7, 9); break; | ||
77 | case 4: __kunmap_atomic_primary(8, 10); break; | ||
78 | |||
79 | case 5 ... 5 + NR_TLB_LINES - 1: | ||
80 | __kunmap_atomic_secondary(type - 5, kvaddr); | ||
81 | break; | ||
82 | |||
83 | default: | ||
84 | BUG(); | ||
85 | } | ||
86 | pagefault_enable(); | ||
87 | } | ||
88 | EXPORT_SYMBOL(__kunmap_atomic); | ||
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h index 75753ca73bfd..77e644082a3b 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h | |||
@@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table; | |||
45 | extern void * kmap_high(struct page *page); | 45 | extern void * kmap_high(struct page *page); |
46 | extern void kunmap_high(struct page *page); | 46 | extern void kunmap_high(struct page *page); |
47 | 47 | ||
48 | extern void *__kmap(struct page *page); | 48 | extern void *kmap(struct page *page); |
49 | extern void __kunmap(struct page *page); | 49 | extern void kunmap(struct page *page); |
50 | extern void *__kmap_atomic(struct page *page, enum km_type type); | 50 | extern void *__kmap_atomic(struct page *page); |
51 | extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); | 51 | extern void __kunmap_atomic(void *kvaddr); |
52 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 52 | extern void *kmap_atomic_pfn(unsigned long pfn); |
53 | extern struct page *__kmap_atomic_to_page(void *ptr); | 53 | extern struct page *kmap_atomic_to_page(void *ptr); |
54 | |||
55 | #define kmap __kmap | ||
56 | #define kunmap __kunmap | ||
57 | #define kmap_atomic __kmap_atomic | ||
58 | #define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck | ||
59 | #define kmap_atomic_to_page __kmap_atomic_to_page | ||
60 | 54 | ||
61 | #define flush_cache_kmaps() flush_cache_all() | 55 | #define flush_cache_kmaps() flush_cache_all() |
62 | 56 | ||
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 6a2b1bf9ef11..1e69b1fb4b85 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c | |||
@@ -9,7 +9,7 @@ static pte_t *kmap_pte; | |||
9 | 9 | ||
10 | unsigned long highstart_pfn, highend_pfn; | 10 | unsigned long highstart_pfn, highend_pfn; |
11 | 11 | ||
12 | void *__kmap(struct page *page) | 12 | void *kmap(struct page *page) |
13 | { | 13 | { |
14 | void *addr; | 14 | void *addr; |
15 | 15 | ||
@@ -21,16 +21,16 @@ void *__kmap(struct page *page) | |||
21 | 21 | ||
22 | return addr; | 22 | return addr; |
23 | } | 23 | } |
24 | EXPORT_SYMBOL(__kmap); | 24 | EXPORT_SYMBOL(kmap); |
25 | 25 | ||
26 | void __kunmap(struct page *page) | 26 | void kunmap(struct page *page) |
27 | { | 27 | { |
28 | BUG_ON(in_interrupt()); | 28 | BUG_ON(in_interrupt()); |
29 | if (!PageHighMem(page)) | 29 | if (!PageHighMem(page)) |
30 | return; | 30 | return; |
31 | kunmap_high(page); | 31 | kunmap_high(page); |
32 | } | 32 | } |
33 | EXPORT_SYMBOL(__kunmap); | 33 | EXPORT_SYMBOL(kunmap); |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | 36 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap); | |||
41 | * kmaps are appropriate for short, tight code paths only. | 41 | * kmaps are appropriate for short, tight code paths only. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | void *__kmap_atomic(struct page *page, enum km_type type) | 44 | void *__kmap_atomic(struct page *page) |
45 | { | 45 | { |
46 | enum fixed_addresses idx; | ||
47 | unsigned long vaddr; | 46 | unsigned long vaddr; |
47 | int idx, type; | ||
48 | 48 | ||
49 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 49 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
50 | pagefault_disable(); | 50 | pagefault_disable(); |
51 | if (!PageHighMem(page)) | 51 | if (!PageHighMem(page)) |
52 | return page_address(page); | 52 | return page_address(page); |
53 | 53 | ||
54 | debug_kmap_atomic(type); | 54 | type = kmap_atomic_idx_push(); |
55 | idx = type + KM_TYPE_NR*smp_processor_id(); | 55 | idx = type + KM_TYPE_NR*smp_processor_id(); |
56 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 56 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
57 | #ifdef CONFIG_DEBUG_HIGHMEM | 57 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type) | |||
64 | } | 64 | } |
65 | EXPORT_SYMBOL(__kmap_atomic); | 65 | EXPORT_SYMBOL(__kmap_atomic); |
66 | 66 | ||
67 | void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 67 | void __kunmap_atomic(void *kvaddr) |
68 | { | 68 | { |
69 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
70 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 69 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
71 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 70 | int type; |
72 | 71 | ||
73 | if (vaddr < FIXADDR_START) { // FIXME | 72 | if (vaddr < FIXADDR_START) { // FIXME |
74 | pagefault_enable(); | 73 | pagefault_enable(); |
75 | return; | 74 | return; |
76 | } | 75 | } |
77 | 76 | ||
78 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 77 | type = kmap_atomic_idx_pop(); |
78 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
79 | { | ||
80 | int idx = type + KM_TYPE_NR * smp_processor_id(); | ||
79 | 81 | ||
80 | /* | 82 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
81 | * force other mappings to Oops if they'll try to access | ||
82 | * this pte without first remap it | ||
83 | */ | ||
84 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
85 | local_flush_tlb_one(vaddr); | ||
86 | #endif | ||
87 | 83 | ||
84 | /* | ||
85 | * force other mappings to Oops if they'll try to access | ||
86 | * this pte without first remap it | ||
87 | */ | ||
88 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
89 | local_flush_tlb_one(vaddr); | ||
90 | } | ||
91 | #endif | ||
88 | pagefault_enable(); | 92 | pagefault_enable(); |
89 | } | 93 | } |
90 | EXPORT_SYMBOL(__kunmap_atomic_notypecheck); | 94 | EXPORT_SYMBOL(__kunmap_atomic); |
91 | 95 | ||
92 | /* | 96 | /* |
93 | * This is the same as kmap_atomic() but can map memory that doesn't | 97 | * This is the same as kmap_atomic() but can map memory that doesn't |
94 | * have a struct page associated with it. | 98 | * have a struct page associated with it. |
95 | */ | 99 | */ |
96 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | 100 | void *kmap_atomic_pfn(unsigned long pfn) |
97 | { | 101 | { |
98 | enum fixed_addresses idx; | ||
99 | unsigned long vaddr; | 102 | unsigned long vaddr; |
103 | int idx, type; | ||
100 | 104 | ||
101 | pagefault_disable(); | 105 | pagefault_disable(); |
102 | 106 | ||
103 | debug_kmap_atomic(type); | 107 | type = kmap_atomic_idx_push(); |
104 | idx = type + KM_TYPE_NR*smp_processor_id(); | 108 | idx = type + KM_TYPE_NR*smp_processor_id(); |
105 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 109 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
106 | set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); | 110 | set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); |
@@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |||
109 | return (void*) vaddr; | 113 | return (void*) vaddr; |
110 | } | 114 | } |
111 | 115 | ||
112 | struct page *__kmap_atomic_to_page(void *ptr) | 116 | struct page *kmap_atomic_to_page(void *ptr) |
113 | { | 117 | { |
114 | unsigned long idx, vaddr = (unsigned long)ptr; | 118 | unsigned long idx, vaddr = (unsigned long)ptr; |
115 | pte_t *pte; | 119 | pte_t *pte; |
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h index b0b187a29b88..f577ba2268ca 100644 --- a/arch/mn10300/include/asm/highmem.h +++ b/arch/mn10300/include/asm/highmem.h | |||
@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page) | |||
70 | * be used in IRQ contexts, so in some (very limited) cases we need | 70 | * be used in IRQ contexts, so in some (very limited) cases we need |
71 | * it. | 71 | * it. |
72 | */ | 72 | */ |
73 | static inline unsigned long kmap_atomic(struct page *page, enum km_type type) | 73 | static inline unsigned long __kmap_atomic(struct page *page) |
74 | { | 74 | { |
75 | enum fixed_addresses idx; | ||
76 | unsigned long vaddr; | 75 | unsigned long vaddr; |
76 | int idx, type; | ||
77 | 77 | ||
78 | pagefault_disable(); | ||
78 | if (page < highmem_start_page) | 79 | if (page < highmem_start_page) |
79 | return page_address(page); | 80 | return page_address(page); |
80 | 81 | ||
81 | debug_kmap_atomic(type); | 82 | type = kmap_atomic_idx_push(); |
82 | idx = type + KM_TYPE_NR * smp_processor_id(); | 83 | idx = type + KM_TYPE_NR * smp_processor_id(); |
83 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 84 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
84 | #if HIGHMEM_DEBUG | 85 | #if HIGHMEM_DEBUG |
@@ -91,26 +92,35 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type) | |||
91 | return vaddr; | 92 | return vaddr; |
92 | } | 93 | } |
93 | 94 | ||
94 | static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type) | 95 | static inline void __kunmap_atomic(unsigned long vaddr) |
95 | { | 96 | { |
96 | #if HIGHMEM_DEBUG | 97 | int type; |
97 | enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id(); | ||
98 | 98 | ||
99 | if (vaddr < FIXADDR_START) /* FIXME */ | 99 | if (vaddr < FIXADDR_START) { /* FIXME */ |
100 | pagefault_enable(); | ||
100 | return; | 101 | return; |
102 | } | ||
101 | 103 | ||
102 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) | 104 | type = kmap_atomic_idx_pop(); |
103 | BUG(); | ||
104 | 105 | ||
105 | /* | 106 | #if HIGHMEM_DEBUG |
106 | * force other mappings to Oops if they'll try to access | 107 | { |
107 | * this pte without first remap it | 108 | unsigned int idx; |
108 | */ | 109 | idx = type + KM_TYPE_NR * smp_processor_id(); |
109 | pte_clear(kmap_pte - idx); | 110 | |
110 | __flush_tlb_one(vaddr); | 111 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) |
112 | BUG(); | ||
113 | |||
114 | /* | ||
115 | * force other mappings to Oops if they'll try to access | ||
116 | * this pte without first remap it | ||
117 | */ | ||
118 | pte_clear(kmap_pte - idx); | ||
119 | __flush_tlb_one(vaddr); | ||
120 | } | ||
111 | #endif | 121 | #endif |
122 | pagefault_enable(); | ||
112 | } | 123 | } |
113 | |||
114 | #endif /* __KERNEL__ */ | 124 | #endif /* __KERNEL__ */ |
115 | 125 | ||
116 | #endif /* _ASM_HIGHMEM_H */ | 126 | #endif /* _ASM_HIGHMEM_H */ |
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index d10d64a4be38..dbc264010d0b 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table; | |||
60 | 60 | ||
61 | extern void *kmap_high(struct page *page); | 61 | extern void *kmap_high(struct page *page); |
62 | extern void kunmap_high(struct page *page); | 62 | extern void kunmap_high(struct page *page); |
63 | extern void *kmap_atomic_prot(struct page *page, enum km_type type, | 63 | extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); |
64 | pgprot_t prot); | 64 | extern void __kunmap_atomic(void *kvaddr); |
65 | extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); | ||
66 | 65 | ||
67 | static inline void *kmap(struct page *page) | 66 | static inline void *kmap(struct page *page) |
68 | { | 67 | { |
@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page) | |||
80 | kunmap_high(page); | 79 | kunmap_high(page); |
81 | } | 80 | } |
82 | 81 | ||
83 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 82 | static inline void *__kmap_atomic(struct page *page) |
84 | { | 83 | { |
85 | return kmap_atomic_prot(page, type, kmap_prot); | 84 | return kmap_atomic_prot(page, kmap_prot); |
86 | } | 85 | } |
87 | 86 | ||
88 | static inline struct page *kmap_atomic_to_page(void *ptr) | 87 | static inline struct page *kmap_atomic_to_page(void *ptr) |
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c index 857d4173f9c6..b0848b462bbc 100644 --- a/arch/powerpc/mm/highmem.c +++ b/arch/powerpc/mm/highmem.c | |||
@@ -29,17 +29,17 @@ | |||
29 | * be used in IRQ contexts, so in some (very limited) cases we need | 29 | * be used in IRQ contexts, so in some (very limited) cases we need |
30 | * it. | 30 | * it. |
31 | */ | 31 | */ |
32 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 32 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
33 | { | 33 | { |
34 | unsigned int idx; | ||
35 | unsigned long vaddr; | 34 | unsigned long vaddr; |
35 | int idx, type; | ||
36 | 36 | ||
37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
38 | pagefault_disable(); | 38 | pagefault_disable(); |
39 | if (!PageHighMem(page)) | 39 | if (!PageHighMem(page)) |
40 | return page_address(page); | 40 | return page_address(page); |
41 | 41 | ||
42 | debug_kmap_atomic(type); | 42 | type = kmap_atomic_idx_push(); |
43 | idx = type + KM_TYPE_NR*smp_processor_id(); | 43 | idx = type + KM_TYPE_NR*smp_processor_id(); |
44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
45 | #ifdef CONFIG_DEBUG_HIGHMEM | 45 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -52,26 +52,33 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
52 | } | 52 | } |
53 | EXPORT_SYMBOL(kmap_atomic_prot); | 53 | EXPORT_SYMBOL(kmap_atomic_prot); |
54 | 54 | ||
55 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 55 | void __kunmap_atomic(void *kvaddr) |
56 | { | 56 | { |
57 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
58 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 57 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
59 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 58 | int type; |
60 | 59 | ||
61 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | 60 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { |
62 | pagefault_enable(); | 61 | pagefault_enable(); |
63 | return; | 62 | return; |
64 | } | 63 | } |
65 | 64 | ||
66 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 65 | type = kmap_atomic_idx_pop(); |
67 | 66 | ||
68 | /* | 67 | #ifdef CONFIG_DEBUG_HIGHMEM |
69 | * force other mappings to Oops if they'll try to access | 68 | { |
70 | * this pte without first remap it | 69 | unsigned int idx; |
71 | */ | 70 | |
72 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | 71 | idx = type + KM_TYPE_NR * smp_processor_id(); |
73 | local_flush_tlb_page(NULL, vaddr); | 72 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
73 | |||
74 | /* | ||
75 | * force other mappings to Oops if they'll try to access | ||
76 | * this pte without first remap it | ||
77 | */ | ||
78 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
79 | local_flush_tlb_page(NULL, vaddr); | ||
80 | } | ||
74 | #endif | 81 | #endif |
75 | pagefault_enable(); | 82 | pagefault_enable(); |
76 | } | 83 | } |
77 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | 84 | EXPORT_SYMBOL(__kunmap_atomic); |
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h index ec23b0a87b98..3d7afbb7f4bb 100644 --- a/arch/sparc/include/asm/highmem.h +++ b/arch/sparc/include/asm/highmem.h | |||
@@ -70,8 +70,8 @@ static inline void kunmap(struct page *page) | |||
70 | kunmap_high(page); | 70 | kunmap_high(page); |
71 | } | 71 | } |
72 | 72 | ||
73 | extern void *kmap_atomic(struct page *page, enum km_type type); | 73 | extern void *__kmap_atomic(struct page *page); |
74 | extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); | 74 | extern void __kunmap_atomic(void *kvaddr); |
75 | extern struct page *kmap_atomic_to_page(void *vaddr); | 75 | extern struct page *kmap_atomic_to_page(void *vaddr); |
76 | 76 | ||
77 | #define flush_cache_kmaps() flush_cache_all() | 77 | #define flush_cache_kmaps() flush_cache_all() |
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index e139e9cbf5f7..5e50c09b7dce 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c | |||
@@ -29,17 +29,17 @@ | |||
29 | #include <asm/tlbflush.h> | 29 | #include <asm/tlbflush.h> |
30 | #include <asm/fixmap.h> | 30 | #include <asm/fixmap.h> |
31 | 31 | ||
32 | void *kmap_atomic(struct page *page, enum km_type type) | 32 | void *__kmap_atomic(struct page *page) |
33 | { | 33 | { |
34 | unsigned long idx; | ||
35 | unsigned long vaddr; | 34 | unsigned long vaddr; |
35 | long idx, type; | ||
36 | 36 | ||
37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
38 | pagefault_disable(); | 38 | pagefault_disable(); |
39 | if (!PageHighMem(page)) | 39 | if (!PageHighMem(page)) |
40 | return page_address(page); | 40 | return page_address(page); |
41 | 41 | ||
42 | debug_kmap_atomic(type); | 42 | type = kmap_atomic_idx_push(); |
43 | idx = type + KM_TYPE_NR*smp_processor_id(); | 43 | idx = type + KM_TYPE_NR*smp_processor_id(); |
44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
45 | 45 | ||
@@ -63,44 +63,50 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
63 | 63 | ||
64 | return (void*) vaddr; | 64 | return (void*) vaddr; |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(kmap_atomic); | 66 | EXPORT_SYMBOL(__kmap_atomic); |
67 | 67 | ||
68 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 68 | void __kunmap_atomic(void *kvaddr) |
69 | { | 69 | { |
70 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
71 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 70 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
72 | unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); | 71 | int type; |
73 | 72 | ||
74 | if (vaddr < FIXADDR_START) { // FIXME | 73 | if (vaddr < FIXADDR_START) { // FIXME |
75 | pagefault_enable(); | 74 | pagefault_enable(); |
76 | return; | 75 | return; |
77 | } | 76 | } |
78 | 77 | ||
79 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); | 78 | type = kmap_atomic_idx_pop(); |
80 | 79 | ||
81 | /* XXX Fix - Anton */ | 80 | #ifdef CONFIG_DEBUG_HIGHMEM |
81 | { | ||
82 | unsigned long idx; | ||
83 | |||
84 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
85 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); | ||
86 | |||
87 | /* XXX Fix - Anton */ | ||
82 | #if 0 | 88 | #if 0 |
83 | __flush_cache_one(vaddr); | 89 | __flush_cache_one(vaddr); |
84 | #else | 90 | #else |
85 | flush_cache_all(); | 91 | flush_cache_all(); |
86 | #endif | 92 | #endif |
87 | 93 | ||
88 | /* | 94 | /* |
89 | * force other mappings to Oops if they'll try to access | 95 | * force other mappings to Oops if they'll try to access |
90 | * this pte without first remap it | 96 | * this pte without first remap it |
91 | */ | 97 | */ |
92 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | 98 | pte_clear(&init_mm, vaddr, kmap_pte-idx); |
93 | /* XXX Fix - Anton */ | 99 | /* XXX Fix - Anton */ |
94 | #if 0 | 100 | #if 0 |
95 | __flush_tlb_one(vaddr); | 101 | __flush_tlb_one(vaddr); |
96 | #else | 102 | #else |
97 | flush_tlb_all(); | 103 | flush_tlb_all(); |
98 | #endif | 104 | #endif |
105 | } | ||
99 | #endif | 106 | #endif |
100 | |||
101 | pagefault_enable(); | 107 | pagefault_enable(); |
102 | } | 108 | } |
103 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | 109 | EXPORT_SYMBOL(__kunmap_atomic); |
104 | 110 | ||
105 | /* We may be fed a pagetable here by ptep_to_xxx and others. */ | 111 | /* We may be fed a pagetable here by ptep_to_xxx and others. */ |
106 | struct page *kmap_atomic_to_page(void *ptr) | 112 | struct page *kmap_atomic_to_page(void *ptr) |
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h index d155db6fa9bd..e0f7ee186721 100644 --- a/arch/tile/include/asm/highmem.h +++ b/arch/tile/include/asm/highmem.h | |||
@@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished); | |||
60 | /* This macro is used only in map_new_virtual() to map "page". */ | 60 | /* This macro is used only in map_new_virtual() to map "page". */ |
61 | #define kmap_prot page_to_kpgprot(page) | 61 | #define kmap_prot page_to_kpgprot(page) |
62 | 62 | ||
63 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); | 63 | void *__kmap_atomic(struct page *page); |
64 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 64 | void __kunmap_atomic(void *kvaddr); |
65 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 65 | void *kmap_atomic_pfn(unsigned long pfn); |
66 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); | ||
66 | struct page *kmap_atomic_to_page(void *ptr); | 67 | struct page *kmap_atomic_to_page(void *ptr); |
67 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); | 68 | void *kmap_atomic_prot(struct page *page, pgprot_t prot); |
68 | void *kmap_atomic(struct page *page, enum km_type type); | ||
69 | void kmap_atomic_fix_kpte(struct page *page, int finished); | 69 | void kmap_atomic_fix_kpte(struct page *page, int finished); |
70 | 70 | ||
71 | #define flush_cache_kmaps() do { } while (0) | 71 | #define flush_cache_kmaps() do { } while (0) |
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c index 12ab137e7d4f..8ef6595e162c 100644 --- a/arch/tile/mm/highmem.c +++ b/arch/tile/mm/highmem.c | |||
@@ -56,50 +56,6 @@ void kunmap(struct page *page) | |||
56 | } | 56 | } |
57 | EXPORT_SYMBOL(kunmap); | 57 | EXPORT_SYMBOL(kunmap); |
58 | 58 | ||
59 | static void debug_kmap_atomic_prot(enum km_type type) | ||
60 | { | ||
61 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
62 | static unsigned warn_count = 10; | ||
63 | |||
64 | if (unlikely(warn_count == 0)) | ||
65 | return; | ||
66 | |||
67 | if (unlikely(in_interrupt())) { | ||
68 | if (in_irq()) { | ||
69 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
70 | type != KM_BIO_SRC_IRQ && | ||
71 | /* type != KM_BIO_DST_IRQ && */ | ||
72 | type != KM_BOUNCE_READ) { | ||
73 | WARN_ON(1); | ||
74 | warn_count--; | ||
75 | } | ||
76 | } else if (!irqs_disabled()) { /* softirq */ | ||
77 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
78 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | ||
79 | type != KM_SKB_SUNRPC_DATA && | ||
80 | type != KM_SKB_DATA_SOFTIRQ && | ||
81 | type != KM_BOUNCE_READ) { | ||
82 | WARN_ON(1); | ||
83 | warn_count--; | ||
84 | } | ||
85 | } | ||
86 | } | ||
87 | |||
88 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | ||
89 | type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) { | ||
90 | if (!irqs_disabled()) { | ||
91 | WARN_ON(1); | ||
92 | warn_count--; | ||
93 | } | ||
94 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | ||
95 | if (irq_count() == 0 && !irqs_disabled()) { | ||
96 | WARN_ON(1); | ||
97 | warn_count--; | ||
98 | } | ||
99 | } | ||
100 | #endif | ||
101 | } | ||
102 | |||
103 | /* | 59 | /* |
104 | * Describe a single atomic mapping of a page on a given cpu at a | 60 | * Describe a single atomic mapping of a page on a given cpu at a |
105 | * given address, and allow it to be linked into a list. | 61 | * given address, and allow it to be linked into a list. |
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished) | |||
240 | * When holding an atomic kmap is is not legal to sleep, so atomic | 196 | * When holding an atomic kmap is is not legal to sleep, so atomic |
241 | * kmaps are appropriate for short, tight code paths only. | 197 | * kmaps are appropriate for short, tight code paths only. |
242 | */ | 198 | */ |
243 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 199 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
244 | { | 200 | { |
245 | enum fixed_addresses idx; | ||
246 | unsigned long vaddr; | 201 | unsigned long vaddr; |
202 | int idx, type; | ||
247 | pte_t *pte; | 203 | pte_t *pte; |
248 | 204 | ||
249 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 205 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
255 | if (!PageHighMem(page)) | 211 | if (!PageHighMem(page)) |
256 | return page_address(page); | 212 | return page_address(page); |
257 | 213 | ||
258 | debug_kmap_atomic_prot(type); | 214 | type = kmap_atomic_idx_push(); |
259 | |||
260 | idx = type + KM_TYPE_NR*smp_processor_id(); | 215 | idx = type + KM_TYPE_NR*smp_processor_id(); |
261 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 216 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
262 | pte = kmap_get_pte(vaddr); | 217 | pte = kmap_get_pte(vaddr); |
@@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
269 | } | 224 | } |
270 | EXPORT_SYMBOL(kmap_atomic_prot); | 225 | EXPORT_SYMBOL(kmap_atomic_prot); |
271 | 226 | ||
272 | void *kmap_atomic(struct page *page, enum km_type type) | 227 | void *__kmap_atomic(struct page *page) |
273 | { | 228 | { |
274 | /* PAGE_NONE is a magic value that tells us to check immutability. */ | 229 | /* PAGE_NONE is a magic value that tells us to check immutability. */ |
275 | return kmap_atomic_prot(page, type, PAGE_NONE); | 230 | return kmap_atomic_prot(page, type, PAGE_NONE); |
276 | } | 231 | } |
277 | EXPORT_SYMBOL(kmap_atomic); | 232 | EXPORT_SYMBOL(__kmap_atomic); |
278 | 233 | ||
279 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 234 | void __kunmap_atomic(void *kvaddr) |
280 | { | 235 | { |
281 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 236 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
282 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
283 | 237 | ||
284 | /* | 238 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && |
285 | * Force other mappings to Oops if they try to access this pte without | 239 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { |
286 | * first remapping it. Keeping stale mappings around is a bad idea. | ||
287 | */ | ||
288 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { | ||
289 | pte_t *pte = kmap_get_pte(vaddr); | 240 | pte_t *pte = kmap_get_pte(vaddr); |
290 | pte_t pteval = *pte; | 241 | pte_t pteval = *pte; |
242 | int idx, type; | ||
243 | |||
244 | type = kmap_atomic_idx_pop(); | ||
245 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
246 | |||
247 | /* | ||
248 | * Force other mappings to Oops if they try to access this pte | ||
249 | * without first remapping it. Keeping stale mappings around | ||
250 | * is a bad idea. | ||
251 | */ | ||
291 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); | 252 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); |
292 | kmap_atomic_unregister(pte_page(pteval), vaddr); | 253 | kmap_atomic_unregister(pte_page(pteval), vaddr); |
293 | kpte_clear_flush(pte, vaddr); | 254 | kpte_clear_flush(pte, vaddr); |
@@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | |||
300 | arch_flush_lazy_mmu_mode(); | 261 | arch_flush_lazy_mmu_mode(); |
301 | pagefault_enable(); | 262 | pagefault_enable(); |
302 | } | 263 | } |
303 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | 264 | EXPORT_SYMBOL(__kunmap_atomic); |
304 | 265 | ||
305 | /* | 266 | /* |
306 | * This API is supposed to allow us to map memory without a "struct page". | 267 | * This API is supposed to allow us to map memory without a "struct page". |
307 | * Currently we don't support this, though this may change in the future. | 268 | * Currently we don't support this, though this may change in the future. |
308 | */ | 269 | */ |
309 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | 270 | void *kmap_atomic_pfn(unsigned long pfn) |
310 | { | 271 | { |
311 | return kmap_atomic(pfn_to_page(pfn), type); | 272 | return kmap_atomic(pfn_to_page(pfn)); |
312 | } | 273 | } |
313 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 274 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) |
314 | { | 275 | { |
315 | return kmap_atomic_prot(pfn_to_page(pfn), type, prot); | 276 | return kmap_atomic_prot(pfn_to_page(pfn), prot); |
316 | } | 277 | } |
317 | 278 | ||
318 | struct page *kmap_atomic_to_page(void *ptr) | 279 | struct page *kmap_atomic_to_page(void *ptr) |
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 8caac76ac324..3bd04022fd0c 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page); | |||
59 | 59 | ||
60 | void *kmap(struct page *page); | 60 | void *kmap(struct page *page); |
61 | void kunmap(struct page *page); | 61 | void kunmap(struct page *page); |
62 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); | 62 | |
63 | void *kmap_atomic(struct page *page, enum km_type type); | 63 | void *kmap_atomic_prot(struct page *page, pgprot_t prot); |
64 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); | 64 | void *__kmap_atomic(struct page *page); |
65 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 65 | void __kunmap_atomic(void *kvaddr); |
66 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 66 | void *kmap_atomic_pfn(unsigned long pfn); |
67 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); | ||
67 | struct page *kmap_atomic_to_page(void *ptr); | 68 | struct page *kmap_atomic_to_page(void *ptr); |
68 | 69 | ||
69 | #define flush_cache_kmaps() do { } while (0) | 70 | #define flush_cache_kmaps() do { } while (0) |
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index c4191b3b7056..363e33eb6ec1 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h | |||
@@ -27,10 +27,10 @@ | |||
27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
28 | 28 | ||
29 | void __iomem * | 29 | void __iomem * |
30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 30 | iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); |
31 | 31 | ||
32 | void | 32 | void |
33 | iounmap_atomic(void __iomem *kvaddr, enum km_type type); | 33 | iounmap_atomic(void __iomem *kvaddr); |
34 | 34 | ||
35 | int | 35 | int |
36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); | 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); |
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c index 67414550c3cc..d5cd13945d5a 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c | |||
@@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
61 | if (!is_crashed_pfn_valid(pfn)) | 61 | if (!is_crashed_pfn_valid(pfn)) |
62 | return -EFAULT; | 62 | return -EFAULT; |
63 | 63 | ||
64 | vaddr = kmap_atomic_pfn(pfn, KM_PTE0); | 64 | vaddr = kmap_atomic_pfn(pfn); |
65 | 65 | ||
66 | if (!userbuf) { | 66 | if (!userbuf) { |
67 | memcpy(buf, (vaddr + offset), csize); | 67 | memcpy(buf, (vaddr + offset), csize); |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 5e8fa12ef861..d723e369003c 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -9,6 +9,7 @@ void *kmap(struct page *page) | |||
9 | return page_address(page); | 9 | return page_address(page); |
10 | return kmap_high(page); | 10 | return kmap_high(page); |
11 | } | 11 | } |
12 | EXPORT_SYMBOL(kmap); | ||
12 | 13 | ||
13 | void kunmap(struct page *page) | 14 | void kunmap(struct page *page) |
14 | { | 15 | { |
@@ -18,6 +19,7 @@ void kunmap(struct page *page) | |||
18 | return; | 19 | return; |
19 | kunmap_high(page); | 20 | kunmap_high(page); |
20 | } | 21 | } |
22 | EXPORT_SYMBOL(kunmap); | ||
21 | 23 | ||
22 | /* | 24 | /* |
23 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | 25 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
@@ -27,10 +29,10 @@ void kunmap(struct page *page) | |||
27 | * However when holding an atomic kmap it is not legal to sleep, so atomic | 29 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
28 | * kmaps are appropriate for short, tight code paths only. | 30 | * kmaps are appropriate for short, tight code paths only. |
29 | */ | 31 | */ |
30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 32 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
31 | { | 33 | { |
32 | enum fixed_addresses idx; | ||
33 | unsigned long vaddr; | 34 | unsigned long vaddr; |
35 | int idx, type; | ||
34 | 36 | ||
35 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
36 | pagefault_disable(); | 38 | pagefault_disable(); |
@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
38 | if (!PageHighMem(page)) | 40 | if (!PageHighMem(page)) |
39 | return page_address(page); | 41 | return page_address(page); |
40 | 42 | ||
41 | debug_kmap_atomic(type); | 43 | type = kmap_atomic_idx_push(); |
42 | |||
43 | idx = type + KM_TYPE_NR*smp_processor_id(); | 44 | idx = type + KM_TYPE_NR*smp_processor_id(); |
44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 45 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
45 | BUG_ON(!pte_none(*(kmap_pte-idx))); | 46 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
@@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
47 | 48 | ||
48 | return (void *)vaddr; | 49 | return (void *)vaddr; |
49 | } | 50 | } |
51 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
52 | |||
53 | void *__kmap_atomic(struct page *page) | ||
54 | { | ||
55 | return kmap_atomic_prot(page, kmap_prot); | ||
56 | } | ||
57 | EXPORT_SYMBOL(__kmap_atomic); | ||
50 | 58 | ||
51 | void *kmap_atomic(struct page *page, enum km_type type) | 59 | /* |
60 | * This is the same as kmap_atomic() but can map memory that doesn't | ||
61 | * have a struct page associated with it. | ||
62 | */ | ||
63 | void *kmap_atomic_pfn(unsigned long pfn) | ||
52 | { | 64 | { |
53 | return kmap_atomic_prot(page, type, kmap_prot); | 65 | return kmap_atomic_prot_pfn(pfn, kmap_prot); |
54 | } | 66 | } |
67 | EXPORT_SYMBOL_GPL(kmap_atomic_pfn); | ||
55 | 68 | ||
56 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 69 | void __kunmap_atomic(void *kvaddr) |
57 | { | 70 | { |
58 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 71 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
59 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 72 | |
60 | 73 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && | |
61 | /* | 74 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { |
62 | * Force other mappings to Oops if they'll try to access this pte | 75 | int idx, type; |
63 | * without first remap it. Keeping stale mappings around is a bad idea | 76 | |
64 | * also, in case the page changes cacheability attributes or becomes | 77 | type = kmap_atomic_idx_pop(); |
65 | * a protected page in a hypervisor. | 78 | idx = type + KM_TYPE_NR * smp_processor_id(); |
66 | */ | 79 | |
67 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) | 80 | #ifdef CONFIG_DEBUG_HIGHMEM |
81 | WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
82 | #endif | ||
83 | /* | ||
84 | * Force other mappings to Oops if they'll try to access this | ||
85 | * pte without first remap it. Keeping stale mappings around | ||
86 | * is a bad idea also, in case the page changes cacheability | ||
87 | * attributes or becomes a protected page in a hypervisor. | ||
88 | */ | ||
68 | kpte_clear_flush(kmap_pte-idx, vaddr); | 89 | kpte_clear_flush(kmap_pte-idx, vaddr); |
69 | else { | 90 | } |
70 | #ifdef CONFIG_DEBUG_HIGHMEM | 91 | #ifdef CONFIG_DEBUG_HIGHMEM |
92 | else { | ||
71 | BUG_ON(vaddr < PAGE_OFFSET); | 93 | BUG_ON(vaddr < PAGE_OFFSET); |
72 | BUG_ON(vaddr >= (unsigned long)high_memory); | 94 | BUG_ON(vaddr >= (unsigned long)high_memory); |
73 | #endif | ||
74 | } | 95 | } |
96 | #endif | ||
75 | 97 | ||
76 | pagefault_enable(); | 98 | pagefault_enable(); |
77 | } | 99 | } |
78 | 100 | EXPORT_SYMBOL(__kunmap_atomic); | |
79 | /* | ||
80 | * This is the same as kmap_atomic() but can map memory that doesn't | ||
81 | * have a struct page associated with it. | ||
82 | */ | ||
83 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | ||
84 | { | ||
85 | return kmap_atomic_prot_pfn(pfn, type, kmap_prot); | ||
86 | } | ||
87 | EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ | ||
88 | 101 | ||
89 | struct page *kmap_atomic_to_page(void *ptr) | 102 | struct page *kmap_atomic_to_page(void *ptr) |
90 | { | 103 | { |
@@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr) | |||
98 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | 111 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); |
99 | return pte_page(*pte); | 112 | return pte_page(*pte); |
100 | } | 113 | } |
101 | |||
102 | EXPORT_SYMBOL(kmap); | ||
103 | EXPORT_SYMBOL(kunmap); | ||
104 | EXPORT_SYMBOL(kmap_atomic); | ||
105 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | ||
106 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
107 | EXPORT_SYMBOL(kmap_atomic_to_page); | 114 | EXPORT_SYMBOL(kmap_atomic_to_page); |
108 | 115 | ||
109 | void __init set_highmem_pages_init(void) | 116 | void __init set_highmem_pages_init(void) |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 72fc70cf6184..75a3d7f24a2c 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) | |||
48 | } | 48 | } |
49 | EXPORT_SYMBOL_GPL(iomap_create_wc); | 49 | EXPORT_SYMBOL_GPL(iomap_create_wc); |
50 | 50 | ||
51 | void | 51 | void iomap_free(resource_size_t base, unsigned long size) |
52 | iomap_free(resource_size_t base, unsigned long size) | ||
53 | { | 52 | { |
54 | io_free_memtype(base, base + size); | 53 | io_free_memtype(base, base + size); |
55 | } | 54 | } |
56 | EXPORT_SYMBOL_GPL(iomap_free); | 55 | EXPORT_SYMBOL_GPL(iomap_free); |
57 | 56 | ||
58 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 57 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) |
59 | { | 58 | { |
60 | enum fixed_addresses idx; | ||
61 | unsigned long vaddr; | 59 | unsigned long vaddr; |
60 | int idx, type; | ||
62 | 61 | ||
63 | pagefault_disable(); | 62 | pagefault_disable(); |
64 | 63 | ||
65 | debug_kmap_atomic(type); | 64 | type = kmap_atomic_idx_push(); |
66 | idx = type + KM_TYPE_NR * smp_processor_id(); | 65 | idx = type + KM_TYPE_NR * smp_processor_id(); |
67 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 66 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
68 | set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); | 67 | set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); |
@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
72 | } | 71 | } |
73 | 72 | ||
74 | /* | 73 | /* |
75 | * Map 'pfn' using fixed map 'type' and protections 'prot' | 74 | * Map 'pfn' using protections 'prot' |
76 | */ | 75 | */ |
77 | void __iomem * | 76 | void __iomem * |
78 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 77 | iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) |
79 | { | 78 | { |
80 | /* | 79 | /* |
81 | * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. | 80 | * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. |
@@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
86 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | 85 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) |
87 | prot = PAGE_KERNEL_UC_MINUS; | 86 | prot = PAGE_KERNEL_UC_MINUS; |
88 | 87 | ||
89 | return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); | 88 | return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); |
90 | } | 89 | } |
91 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); | 90 | EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); |
92 | 91 | ||
93 | void | 92 | void |
94 | iounmap_atomic(void __iomem *kvaddr, enum km_type type) | 93 | iounmap_atomic(void __iomem *kvaddr) |
95 | { | 94 | { |
96 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 95 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
97 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
98 | 96 | ||
99 | /* | 97 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && |
100 | * Force other mappings to Oops if they'll try to access this pte | 98 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { |
101 | * without first remap it. Keeping stale mappings around is a bad idea | 99 | int idx, type; |
102 | * also, in case the page changes cacheability attributes or becomes | 100 | |
103 | * a protected page in a hypervisor. | 101 | type = kmap_atomic_idx_pop(); |
104 | */ | 102 | idx = type + KM_TYPE_NR * smp_processor_id(); |
105 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) | 103 | |
104 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
105 | WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
106 | #endif | ||
107 | /* | ||
108 | * Force other mappings to Oops if they'll try to access this | ||
109 | * pte without first remap it. Keeping stale mappings around | ||
110 | * is a bad idea also, in case the page changes cacheability | ||
111 | * attributes or becomes a protected page in a hypervisor. | ||
112 | */ | ||
106 | kpte_clear_flush(kmap_pte-idx, vaddr); | 113 | kpte_clear_flush(kmap_pte-idx, vaddr); |
114 | } | ||
107 | 115 | ||
108 | pagefault_enable(); | 116 | pagefault_enable(); |
109 | } | 117 | } |