aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/highmem.h6
-rw-r--r--arch/arm/mm/highmem.c23
-rw-r--r--arch/frv/include/asm/highmem.h25
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c4
-rw-r--r--arch/frv/mm/cache-page.c8
-rw-r--r--arch/frv/mm/highmem.c50
-rw-r--r--arch/mips/include/asm/highmem.h18
-rw-r--r--arch/mips/mm/highmem.c50
-rw-r--r--arch/mn10300/include/asm/highmem.h42
-rw-r--r--arch/powerpc/include/asm/highmem.h9
-rw-r--r--arch/powerpc/mm/highmem.c35
-rw-r--r--arch/sparc/include/asm/highmem.h4
-rw-r--r--arch/sparc/mm/highmem.c48
-rw-r--r--arch/tile/include/asm/highmem.h10
-rw-r--r--arch/tile/mm/highmem.c85
-rw-r--r--arch/x86/include/asm/highmem.h11
-rw-r--r--arch/x86/include/asm/iomap.h4
-rw-r--r--arch/x86/kernel/crash_dump_32.c2
-rw-r--r--arch/x86/mm/highmem_32.c75
-rw-r--r--arch/x86/mm/iomap_32.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c5
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c8
-rw-r--r--include/linux/highmem.h61
-rw-r--r--include/linux/io-mapping.h14
-rw-r--r--mm/highmem.c62
28 files changed, 367 insertions, 372 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 5aff58126602..1fc684e70ab6 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
35#ifdef CONFIG_HIGHMEM 35#ifdef CONFIG_HIGHMEM
36extern void *kmap(struct page *page); 36extern void *kmap(struct page *page);
37extern void kunmap(struct page *page); 37extern void kunmap(struct page *page);
38extern void *kmap_atomic(struct page *page, enum km_type type); 38extern void *__kmap_atomic(struct page *page);
39extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 39extern void __kunmap_atomic(void *kvaddr);
40extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 40extern void *kmap_atomic_pfn(unsigned long pfn);
41extern struct page *kmap_atomic_to_page(const void *ptr); 41extern struct page *kmap_atomic_to_page(const void *ptr);
42#endif 42#endif
43 43
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 1fbdb55bfd1b..c00f119babbf 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
36} 36}
37EXPORT_SYMBOL(kunmap); 37EXPORT_SYMBOL(kunmap);
38 38
39void *kmap_atomic(struct page *page, enum km_type type) 39void *__kmap_atomic(struct page *page)
40{ 40{
41 unsigned int idx; 41 unsigned int idx;
42 unsigned long vaddr; 42 unsigned long vaddr;
43 void *kmap; 43 void *kmap;
44 int type;
44 45
45 pagefault_disable(); 46 pagefault_disable();
46 if (!PageHighMem(page)) 47 if (!PageHighMem(page))
47 return page_address(page); 48 return page_address(page);
48 49
49 debug_kmap_atomic(type);
50
51#ifdef CONFIG_DEBUG_HIGHMEM 50#ifdef CONFIG_DEBUG_HIGHMEM
52 /* 51 /*
53 * There is no cache coherency issue when non VIVT, so force the 52 * There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
61 if (kmap) 60 if (kmap)
62 return kmap; 61 return kmap;
63 62
63 type = kmap_atomic_idx_push();
64
64 idx = type + KM_TYPE_NR * smp_processor_id(); 65 idx = type + KM_TYPE_NR * smp_processor_id();
65 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
66#ifdef CONFIG_DEBUG_HIGHMEM 67#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
80 81
81 return (void *)vaddr; 82 return (void *)vaddr;
82} 83}
83EXPORT_SYMBOL(kmap_atomic); 84EXPORT_SYMBOL(__kmap_atomic);
84 85
85void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 86void __kunmap_atomic(void *kvaddr)
86{ 87{
87 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 88 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
88 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 89 int idx, type;
89 90
90 if (kvaddr >= (void *)FIXADDR_START) { 91 if (kvaddr >= (void *)FIXADDR_START) {
92 type = kmap_atomic_idx_pop();
93 idx = type + KM_TYPE_NR * smp_processor_id();
94
91 if (cache_is_vivt()) 95 if (cache_is_vivt())
92 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
93#ifdef CONFIG_DEBUG_HIGHMEM 97#ifdef CONFIG_DEBUG_HIGHMEM
@@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
103 } 107 }
104 pagefault_enable(); 108 pagefault_enable();
105} 109}
106EXPORT_SYMBOL(kunmap_atomic_notypecheck); 110EXPORT_SYMBOL(__kunmap_atomic);
107 111
108void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 112void *kmap_atomic_pfn(unsigned long pfn)
109{ 113{
110 unsigned int idx;
111 unsigned long vaddr; 114 unsigned long vaddr;
115 int idx, type;
112 116
113 pagefault_disable(); 117 pagefault_disable();
114 118
119 type = kmap_atomic_idx_push();
115 idx = type + KM_TYPE_NR * smp_processor_id(); 120 idx = type + KM_TYPE_NR * smp_processor_id();
116 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 121 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
117#ifdef CONFIG_DEBUG_HIGHMEM 122#ifdef CONFIG_DEBUG_HIGHMEM
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
index cb4c317eaecc..a8d6565d415d 100644
--- a/arch/frv/include/asm/highmem.h
+++ b/arch/frv/include/asm/highmem.h
@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
112 (void *) damlr; \ 112 (void *) damlr; \
113}) 113})
114 114
115static inline void *kmap_atomic(struct page *page, enum km_type type) 115static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
116{ 116{
117 unsigned long paddr; 117 unsigned long paddr;
118 118
119 pagefault_disable(); 119 pagefault_disable();
120 debug_kmap_atomic(type);
121 paddr = page_to_phys(page); 120 paddr = page_to_phys(page);
122 121
123 switch (type) { 122 switch (type) {
@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
125 case 1: return __kmap_atomic_primary(1, paddr, 3); 124 case 1: return __kmap_atomic_primary(1, paddr, 3);
126 case 2: return __kmap_atomic_primary(2, paddr, 4); 125 case 2: return __kmap_atomic_primary(2, paddr, 4);
127 case 3: return __kmap_atomic_primary(3, paddr, 5); 126 case 3: return __kmap_atomic_primary(3, paddr, 5);
128 case 4: return __kmap_atomic_primary(4, paddr, 6);
129 case 5: return __kmap_atomic_primary(5, paddr, 7);
130 case 6: return __kmap_atomic_primary(6, paddr, 8);
131 case 7: return __kmap_atomic_primary(7, paddr, 9);
132 case 8: return __kmap_atomic_primary(8, paddr, 10);
133
134 case 9 ... 9 + NR_TLB_LINES - 1:
135 return __kmap_atomic_secondary(type - 9, paddr);
136 127
137 default: 128 default:
138 BUG(); 129 BUG();
@@ -152,22 +143,13 @@ do { \
152 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ 143 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
153} while(0) 144} while(0)
154 145
155static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 146static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
156{ 147{
157 switch (type) { 148 switch (type) {
158 case 0: __kunmap_atomic_primary(0, 2); break; 149 case 0: __kunmap_atomic_primary(0, 2); break;
159 case 1: __kunmap_atomic_primary(1, 3); break; 150 case 1: __kunmap_atomic_primary(1, 3); break;
160 case 2: __kunmap_atomic_primary(2, 4); break; 151 case 2: __kunmap_atomic_primary(2, 4); break;
161 case 3: __kunmap_atomic_primary(3, 5); break; 152 case 3: __kunmap_atomic_primary(3, 5); break;
162 case 4: __kunmap_atomic_primary(4, 6); break;
163 case 5: __kunmap_atomic_primary(5, 7); break;
164 case 6: __kunmap_atomic_primary(6, 8); break;
165 case 7: __kunmap_atomic_primary(7, 9); break;
166 case 8: __kunmap_atomic_primary(8, 10); break;
167
168 case 9 ... 9 + NR_TLB_LINES - 1:
169 __kunmap_atomic_secondary(type - 9, kvaddr);
170 break;
171 153
172 default: 154 default:
173 BUG(); 155 BUG();
@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
175 pagefault_enable(); 157 pagefault_enable();
176} 158}
177 159
160void *__kmap_atomic(struct page *page);
161void __kunmap_atomic(void *kvaddr);
162
178#endif /* !__ASSEMBLY__ */ 163#endif /* !__ASSEMBLY__ */
179 164
180#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 85d110b71cf7..41098a3803a2 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
61 dampr2 = __get_DAMPR(2); 61 dampr2 = __get_DAMPR(2);
62 62
63 for (i = 0; i < nents; i++) { 63 for (i = 0; i < nents; i++) {
64 vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE); 64 vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
65 65
66 frv_dcache_writeback((unsigned long) vaddr, 66 frv_dcache_writeback((unsigned long) vaddr,
67 (unsigned long) vaddr + PAGE_SIZE); 67 (unsigned long) vaddr + PAGE_SIZE);
68 68
69 } 69 }
70 70
71 kunmap_atomic(vaddr, __KM_CACHE); 71 kunmap_atomic_primary(vaddr, __KM_CACHE);
72 if (dampr2) { 72 if (dampr2) {
73 __set_DAMPR(2, dampr2); 73 __set_DAMPR(2, dampr2);
74 __set_IAMPR(2, dampr2); 74 __set_IAMPR(2, dampr2);
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c
index 0261cbe153b5..b24ade27a0f0 100644
--- a/arch/frv/mm/cache-page.c
+++ b/arch/frv/mm/cache-page.c
@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
26 26
27 dampr2 = __get_DAMPR(2); 27 dampr2 = __get_DAMPR(2);
28 28
29 vaddr = kmap_atomic(page, __KM_CACHE); 29 vaddr = kmap_atomic_primary(page, __KM_CACHE);
30 30
31 frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); 31 frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
32 32
33 kunmap_atomic(vaddr, __KM_CACHE); 33 kunmap_atomic_primary(vaddr, __KM_CACHE);
34 34
35 if (dampr2) { 35 if (dampr2) {
36 __set_DAMPR(2, dampr2); 36 __set_DAMPR(2, dampr2);
@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
54 54
55 dampr2 = __get_DAMPR(2); 55 dampr2 = __get_DAMPR(2);
56 56
57 vaddr = kmap_atomic(page, __KM_CACHE); 57 vaddr = kmap_atomic_primary(page, __KM_CACHE);
58 58
59 start = (start & ~PAGE_MASK) | (unsigned long) vaddr; 59 start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
60 frv_cache_wback_inv(start, start + len); 60 frv_cache_wback_inv(start, start + len);
61 61
62 kunmap_atomic(vaddr, __KM_CACHE); 62 kunmap_atomic_primary(vaddr, __KM_CACHE);
63 63
64 if (dampr2) { 64 if (dampr2) {
65 __set_DAMPR(2, dampr2); 65 __set_DAMPR(2, dampr2);
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index eadd07658075..61088dcc1594 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr)
36{ 36{
37 return virt_to_page(ptr); 37 return virt_to_page(ptr);
38} 38}
39
40void *__kmap_atomic(struct page *page)
41{
42 unsigned long paddr;
43 int type;
44
45 pagefault_disable();
46 type = kmap_atomic_idx_push();
47 paddr = page_to_phys(page);
48
49 switch (type) {
50 /*
51 * The first 4 primary maps are reserved for architecture code
52 */
53 case 0: return __kmap_atomic_primary(4, paddr, 6);
54 case 1: return __kmap_atomic_primary(5, paddr, 7);
55 case 2: return __kmap_atomic_primary(6, paddr, 8);
56 case 3: return __kmap_atomic_primary(7, paddr, 9);
57 case 4: return __kmap_atomic_primary(8, paddr, 10);
58
59 case 5 ... 5 + NR_TLB_LINES - 1:
60 return __kmap_atomic_secondary(type - 5, paddr);
61
62 default:
63 BUG();
64 return NULL;
65 }
66}
67EXPORT_SYMBOL(__kmap_atomic);
68
69void __kunmap_atomic(void *kvaddr)
70{
71 int type = kmap_atomic_idx_pop();
72 switch (type) {
73 case 0: __kunmap_atomic_primary(4, 6); break;
74 case 1: __kunmap_atomic_primary(5, 7); break;
75 case 2: __kunmap_atomic_primary(6, 8); break;
76 case 3: __kunmap_atomic_primary(7, 9); break;
77 case 4: __kunmap_atomic_primary(8, 10); break;
78
79 case 5 ... 5 + NR_TLB_LINES - 1:
80 __kunmap_atomic_secondary(type - 5, kvaddr);
81 break;
82
83 default:
84 BUG();
85 }
86 pagefault_enable();
87}
88EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 75753ca73bfd..77e644082a3b 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
45extern void * kmap_high(struct page *page); 45extern void * kmap_high(struct page *page);
46extern void kunmap_high(struct page *page); 46extern void kunmap_high(struct page *page);
47 47
48extern void *__kmap(struct page *page); 48extern void *kmap(struct page *page);
49extern void __kunmap(struct page *page); 49extern void kunmap(struct page *page);
50extern void *__kmap_atomic(struct page *page, enum km_type type); 50extern void *__kmap_atomic(struct page *page);
51extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 51extern void __kunmap_atomic(void *kvaddr);
52extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 52extern void *kmap_atomic_pfn(unsigned long pfn);
53extern struct page *__kmap_atomic_to_page(void *ptr); 53extern struct page *kmap_atomic_to_page(void *ptr);
54
55#define kmap __kmap
56#define kunmap __kunmap
57#define kmap_atomic __kmap_atomic
58#define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck
59#define kmap_atomic_to_page __kmap_atomic_to_page
60 54
61#define flush_cache_kmaps() flush_cache_all() 55#define flush_cache_kmaps() flush_cache_all()
62 56
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 6a2b1bf9ef11..1e69b1fb4b85 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -9,7 +9,7 @@ static pte_t *kmap_pte;
9 9
10unsigned long highstart_pfn, highend_pfn; 10unsigned long highstart_pfn, highend_pfn;
11 11
12void *__kmap(struct page *page) 12void *kmap(struct page *page)
13{ 13{
14 void *addr; 14 void *addr;
15 15
@@ -21,16 +21,16 @@ void *__kmap(struct page *page)
21 21
22 return addr; 22 return addr;
23} 23}
24EXPORT_SYMBOL(__kmap); 24EXPORT_SYMBOL(kmap);
25 25
26void __kunmap(struct page *page) 26void kunmap(struct page *page)
27{ 27{
28 BUG_ON(in_interrupt()); 28 BUG_ON(in_interrupt());
29 if (!PageHighMem(page)) 29 if (!PageHighMem(page))
30 return; 30 return;
31 kunmap_high(page); 31 kunmap_high(page);
32} 32}
33EXPORT_SYMBOL(__kunmap); 33EXPORT_SYMBOL(kunmap);
34 34
35/* 35/*
36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
41 * kmaps are appropriate for short, tight code paths only. 41 * kmaps are appropriate for short, tight code paths only.
42 */ 42 */
43 43
44void *__kmap_atomic(struct page *page, enum km_type type) 44void *__kmap_atomic(struct page *page)
45{ 45{
46 enum fixed_addresses idx;
47 unsigned long vaddr; 46 unsigned long vaddr;
47 int idx, type;
48 48
49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
50 pagefault_disable(); 50 pagefault_disable();
51 if (!PageHighMem(page)) 51 if (!PageHighMem(page))
52 return page_address(page); 52 return page_address(page);
53 53
54 debug_kmap_atomic(type); 54 type = kmap_atomic_idx_push();
55 idx = type + KM_TYPE_NR*smp_processor_id(); 55 idx = type + KM_TYPE_NR*smp_processor_id();
56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
57#ifdef CONFIG_DEBUG_HIGHMEM 57#ifdef CONFIG_DEBUG_HIGHMEM
@@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type)
64} 64}
65EXPORT_SYMBOL(__kmap_atomic); 65EXPORT_SYMBOL(__kmap_atomic);
66 66
67void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 67void __kunmap_atomic(void *kvaddr)
68{ 68{
69#ifdef CONFIG_DEBUG_HIGHMEM
70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 69 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
71 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 70 int type;
72 71
73 if (vaddr < FIXADDR_START) { // FIXME 72 if (vaddr < FIXADDR_START) { // FIXME
74 pagefault_enable(); 73 pagefault_enable();
75 return; 74 return;
76 } 75 }
77 76
78 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 77 type = kmap_atomic_idx_pop();
78#ifdef CONFIG_DEBUG_HIGHMEM
79 {
80 int idx = type + KM_TYPE_NR * smp_processor_id();
79 81
80 /* 82 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
81 * force other mappings to Oops if they'll try to access
82 * this pte without first remap it
83 */
84 pte_clear(&init_mm, vaddr, kmap_pte-idx);
85 local_flush_tlb_one(vaddr);
86#endif
87 83
84 /*
85 * force other mappings to Oops if they'll try to access
86 * this pte without first remap it
87 */
88 pte_clear(&init_mm, vaddr, kmap_pte-idx);
89 local_flush_tlb_one(vaddr);
90 }
91#endif
88 pagefault_enable(); 92 pagefault_enable();
89} 93}
90EXPORT_SYMBOL(__kunmap_atomic_notypecheck); 94EXPORT_SYMBOL(__kunmap_atomic);
91 95
92/* 96/*
93 * This is the same as kmap_atomic() but can map memory that doesn't 97 * This is the same as kmap_atomic() but can map memory that doesn't
94 * have a struct page associated with it. 98 * have a struct page associated with it.
95 */ 99 */
96void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 100void *kmap_atomic_pfn(unsigned long pfn)
97{ 101{
98 enum fixed_addresses idx;
99 unsigned long vaddr; 102 unsigned long vaddr;
103 int idx, type;
100 104
101 pagefault_disable(); 105 pagefault_disable();
102 106
103 debug_kmap_atomic(type); 107 type = kmap_atomic_idx_push();
104 idx = type + KM_TYPE_NR*smp_processor_id(); 108 idx = type + KM_TYPE_NR*smp_processor_id();
105 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 109 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
106 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 110 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
@@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
109 return (void*) vaddr; 113 return (void*) vaddr;
110} 114}
111 115
112struct page *__kmap_atomic_to_page(void *ptr) 116struct page *kmap_atomic_to_page(void *ptr)
113{ 117{
114 unsigned long idx, vaddr = (unsigned long)ptr; 118 unsigned long idx, vaddr = (unsigned long)ptr;
115 pte_t *pte; 119 pte_t *pte;
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index b0b187a29b88..f577ba2268ca 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
70 * be used in IRQ contexts, so in some (very limited) cases we need 70 * be used in IRQ contexts, so in some (very limited) cases we need
71 * it. 71 * it.
72 */ 72 */
73static inline unsigned long kmap_atomic(struct page *page, enum km_type type) 73static inline unsigned long __kmap_atomic(struct page *page)
74{ 74{
75 enum fixed_addresses idx;
76 unsigned long vaddr; 75 unsigned long vaddr;
76 int idx, type;
77 77
78 pagefault_disable();
78 if (page < highmem_start_page) 79 if (page < highmem_start_page)
79 return page_address(page); 80 return page_address(page);
80 81
81 debug_kmap_atomic(type); 82 type = kmap_atomic_idx_push();
82 idx = type + KM_TYPE_NR * smp_processor_id(); 83 idx = type + KM_TYPE_NR * smp_processor_id();
83 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 84 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
84#if HIGHMEM_DEBUG 85#if HIGHMEM_DEBUG
@@ -91,26 +92,35 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
91 return vaddr; 92 return vaddr;
92} 93}
93 94
94static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type) 95static inline void __kunmap_atomic(unsigned long vaddr)
95{ 96{
96#if HIGHMEM_DEBUG 97 int type;
97 enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
98 98
99 if (vaddr < FIXADDR_START) /* FIXME */ 99 if (vaddr < FIXADDR_START) { /* FIXME */
100 pagefault_enable();
100 return; 101 return;
102 }
101 103
102 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) 104 type = kmap_atomic_idx_pop();
103 BUG();
104 105
105 /* 106#if HIGHMEM_DEBUG
106 * force other mappings to Oops if they'll try to access 107 {
107 * this pte without first remap it 108 unsigned int idx;
108 */ 109 idx = type + KM_TYPE_NR * smp_processor_id();
109 pte_clear(kmap_pte - idx); 110
110 __flush_tlb_one(vaddr); 111 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
112 BUG();
113
114 /*
115 * force other mappings to Oops if they'll try to access
116 * this pte without first remap it
117 */
118 pte_clear(kmap_pte - idx);
119 __flush_tlb_one(vaddr);
120 }
111#endif 121#endif
122 pagefault_enable();
112} 123}
113
114#endif /* __KERNEL__ */ 124#endif /* __KERNEL__ */
115 125
116#endif /* _ASM_HIGHMEM_H */ 126#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index d10d64a4be38..dbc264010d0b 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table;
60 60
61extern void *kmap_high(struct page *page); 61extern void *kmap_high(struct page *page);
62extern void kunmap_high(struct page *page); 62extern void kunmap_high(struct page *page);
63extern void *kmap_atomic_prot(struct page *page, enum km_type type, 63extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
64 pgprot_t prot); 64extern void __kunmap_atomic(void *kvaddr);
65extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
66 65
67static inline void *kmap(struct page *page) 66static inline void *kmap(struct page *page)
68{ 67{
@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page)
80 kunmap_high(page); 79 kunmap_high(page);
81} 80}
82 81
83static inline void *kmap_atomic(struct page *page, enum km_type type) 82static inline void *__kmap_atomic(struct page *page)
84{ 83{
85 return kmap_atomic_prot(page, type, kmap_prot); 84 return kmap_atomic_prot(page, kmap_prot);
86} 85}
87 86
88static inline struct page *kmap_atomic_to_page(void *ptr) 87static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index 857d4173f9c6..b0848b462bbc 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -29,17 +29,17 @@
29 * be used in IRQ contexts, so in some (very limited) cases we need 29 * be used in IRQ contexts, so in some (very limited) cases we need
30 * it. 30 * it.
31 */ 31 */
32void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 32void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33{ 33{
34 unsigned int idx;
35 unsigned long vaddr; 34 unsigned long vaddr;
35 int idx, type;
36 36
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 pagefault_disable(); 38 pagefault_disable();
39 if (!PageHighMem(page)) 39 if (!PageHighMem(page))
40 return page_address(page); 40 return page_address(page);
41 41
42 debug_kmap_atomic(type); 42 type = kmap_atomic_idx_push();
43 idx = type + KM_TYPE_NR*smp_processor_id(); 43 idx = type + KM_TYPE_NR*smp_processor_id();
44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45#ifdef CONFIG_DEBUG_HIGHMEM 45#ifdef CONFIG_DEBUG_HIGHMEM
@@ -52,26 +52,33 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
52} 52}
53EXPORT_SYMBOL(kmap_atomic_prot); 53EXPORT_SYMBOL(kmap_atomic_prot);
54 54
55void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 55void __kunmap_atomic(void *kvaddr)
56{ 56{
57#ifdef CONFIG_DEBUG_HIGHMEM
58 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 57 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
59 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 58 int type;
60 59
61 if (vaddr < __fix_to_virt(FIX_KMAP_END)) { 60 if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
62 pagefault_enable(); 61 pagefault_enable();
63 return; 62 return;
64 } 63 }
65 64
66 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 65 type = kmap_atomic_idx_pop();
67 66
68 /* 67#ifdef CONFIG_DEBUG_HIGHMEM
69 * force other mappings to Oops if they'll try to access 68 {
70 * this pte without first remap it 69 unsigned int idx;
71 */ 70
72 pte_clear(&init_mm, vaddr, kmap_pte-idx); 71 idx = type + KM_TYPE_NR * smp_processor_id();
73 local_flush_tlb_page(NULL, vaddr); 72 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
73
74 /*
75 * force other mappings to Oops if they'll try to access
76 * this pte without first remap it
77 */
78 pte_clear(&init_mm, vaddr, kmap_pte-idx);
79 local_flush_tlb_page(NULL, vaddr);
80 }
74#endif 81#endif
75 pagefault_enable(); 82 pagefault_enable();
76} 83}
77EXPORT_SYMBOL(kunmap_atomic_notypecheck); 84EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index ec23b0a87b98..3d7afbb7f4bb 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -70,8 +70,8 @@ static inline void kunmap(struct page *page)
70 kunmap_high(page); 70 kunmap_high(page);
71} 71}
72 72
73extern void *kmap_atomic(struct page *page, enum km_type type); 73extern void *__kmap_atomic(struct page *page);
74extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 74extern void __kunmap_atomic(void *kvaddr);
75extern struct page *kmap_atomic_to_page(void *vaddr); 75extern struct page *kmap_atomic_to_page(void *vaddr);
76 76
77#define flush_cache_kmaps() flush_cache_all() 77#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index e139e9cbf5f7..5e50c09b7dce 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -29,17 +29,17 @@
29#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
30#include <asm/fixmap.h> 30#include <asm/fixmap.h>
31 31
32void *kmap_atomic(struct page *page, enum km_type type) 32void *__kmap_atomic(struct page *page)
33{ 33{
34 unsigned long idx;
35 unsigned long vaddr; 34 unsigned long vaddr;
35 long idx, type;
36 36
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 pagefault_disable(); 38 pagefault_disable();
39 if (!PageHighMem(page)) 39 if (!PageHighMem(page))
40 return page_address(page); 40 return page_address(page);
41 41
42 debug_kmap_atomic(type); 42 type = kmap_atomic_idx_push();
43 idx = type + KM_TYPE_NR*smp_processor_id(); 43 idx = type + KM_TYPE_NR*smp_processor_id();
44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45 45
@@ -63,44 +63,50 @@ void *kmap_atomic(struct page *page, enum km_type type)
63 63
64 return (void*) vaddr; 64 return (void*) vaddr;
65} 65}
66EXPORT_SYMBOL(kmap_atomic); 66EXPORT_SYMBOL(__kmap_atomic);
67 67
68void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 68void __kunmap_atomic(void *kvaddr)
69{ 69{
70#ifdef CONFIG_DEBUG_HIGHMEM
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); 71 int type;
73 72
74 if (vaddr < FIXADDR_START) { // FIXME 73 if (vaddr < FIXADDR_START) { // FIXME
75 pagefault_enable(); 74 pagefault_enable();
76 return; 75 return;
77 } 76 }
78 77
79 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); 78 type = kmap_atomic_idx_pop();
80 79
81/* XXX Fix - Anton */ 80#ifdef CONFIG_DEBUG_HIGHMEM
81 {
82 unsigned long idx;
83
84 idx = type + KM_TYPE_NR * smp_processor_id();
85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
86
87 /* XXX Fix - Anton */
82#if 0 88#if 0
83 __flush_cache_one(vaddr); 89 __flush_cache_one(vaddr);
84#else 90#else
85 flush_cache_all(); 91 flush_cache_all();
86#endif 92#endif
87 93
88 /* 94 /*
89 * force other mappings to Oops if they'll try to access 95 * force other mappings to Oops if they'll try to access
90 * this pte without first remap it 96 * this pte without first remap it
91 */ 97 */
92 pte_clear(&init_mm, vaddr, kmap_pte-idx); 98 pte_clear(&init_mm, vaddr, kmap_pte-idx);
93/* XXX Fix - Anton */ 99 /* XXX Fix - Anton */
94#if 0 100#if 0
95 __flush_tlb_one(vaddr); 101 __flush_tlb_one(vaddr);
96#else 102#else
97 flush_tlb_all(); 103 flush_tlb_all();
98#endif 104#endif
105 }
99#endif 106#endif
100
101 pagefault_enable(); 107 pagefault_enable();
102} 108}
103EXPORT_SYMBOL(kunmap_atomic_notypecheck); 109EXPORT_SYMBOL(__kunmap_atomic);
104 110
105/* We may be fed a pagetable here by ptep_to_xxx and others. */ 111/* We may be fed a pagetable here by ptep_to_xxx and others. */
106struct page *kmap_atomic_to_page(void *ptr) 112struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index d155db6fa9bd..e0f7ee186721 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
60/* This macro is used only in map_new_virtual() to map "page". */ 60/* This macro is used only in map_new_virtual() to map "page". */
61#define kmap_prot page_to_kpgprot(page) 61#define kmap_prot page_to_kpgprot(page)
62 62
63void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 63void *__kmap_atomic(struct page *page);
64void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 64void __kunmap_atomic(void *kvaddr);
65void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 65void *kmap_atomic_pfn(unsigned long pfn);
66void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
66struct page *kmap_atomic_to_page(void *ptr); 67struct page *kmap_atomic_to_page(void *ptr);
67void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); 68void *kmap_atomic_prot(struct page *page, pgprot_t prot);
68void *kmap_atomic(struct page *page, enum km_type type);
69void kmap_atomic_fix_kpte(struct page *page, int finished); 69void kmap_atomic_fix_kpte(struct page *page, int finished);
70 70
71#define flush_cache_kmaps() do { } while (0) 71#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 12ab137e7d4f..8ef6595e162c 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -56,50 +56,6 @@ void kunmap(struct page *page)
56} 56}
57EXPORT_SYMBOL(kunmap); 57EXPORT_SYMBOL(kunmap);
58 58
59static void debug_kmap_atomic_prot(enum km_type type)
60{
61#ifdef CONFIG_DEBUG_HIGHMEM
62 static unsigned warn_count = 10;
63
64 if (unlikely(warn_count == 0))
65 return;
66
67 if (unlikely(in_interrupt())) {
68 if (in_irq()) {
69 if (type != KM_IRQ0 && type != KM_IRQ1 &&
70 type != KM_BIO_SRC_IRQ &&
71 /* type != KM_BIO_DST_IRQ && */
72 type != KM_BOUNCE_READ) {
73 WARN_ON(1);
74 warn_count--;
75 }
76 } else if (!irqs_disabled()) { /* softirq */
77 if (type != KM_IRQ0 && type != KM_IRQ1 &&
78 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
79 type != KM_SKB_SUNRPC_DATA &&
80 type != KM_SKB_DATA_SOFTIRQ &&
81 type != KM_BOUNCE_READ) {
82 WARN_ON(1);
83 warn_count--;
84 }
85 }
86 }
87
88 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
89 type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
90 if (!irqs_disabled()) {
91 WARN_ON(1);
92 warn_count--;
93 }
94 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
95 if (irq_count() == 0 && !irqs_disabled()) {
96 WARN_ON(1);
97 warn_count--;
98 }
99 }
100#endif
101}
102
103/* 59/*
104 * Describe a single atomic mapping of a page on a given cpu at a 60 * Describe a single atomic mapping of a page on a given cpu at a
105 * given address, and allow it to be linked into a list. 61 * given address, and allow it to be linked into a list.
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
240 * When holding an atomic kmap is is not legal to sleep, so atomic 196 * When holding an atomic kmap is is not legal to sleep, so atomic
241 * kmaps are appropriate for short, tight code paths only. 197 * kmaps are appropriate for short, tight code paths only.
242 */ 198 */
243void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 199void *kmap_atomic_prot(struct page *page, pgprot_t prot)
244{ 200{
245 enum fixed_addresses idx;
246 unsigned long vaddr; 201 unsigned long vaddr;
202 int idx, type;
247 pte_t *pte; 203 pte_t *pte;
248 204
249 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 205 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
255 if (!PageHighMem(page)) 211 if (!PageHighMem(page))
256 return page_address(page); 212 return page_address(page);
257 213
258 debug_kmap_atomic_prot(type); 214 type = kmap_atomic_idx_push();
259
260 idx = type + KM_TYPE_NR*smp_processor_id(); 215 idx = type + KM_TYPE_NR*smp_processor_id();
261 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 216 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
262 pte = kmap_get_pte(vaddr); 217 pte = kmap_get_pte(vaddr);
@@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
269} 224}
270EXPORT_SYMBOL(kmap_atomic_prot); 225EXPORT_SYMBOL(kmap_atomic_prot);
271 226
272void *kmap_atomic(struct page *page, enum km_type type) 227void *__kmap_atomic(struct page *page)
273{ 228{
274 /* PAGE_NONE is a magic value that tells us to check immutability. */ 229 /* PAGE_NONE is a magic value that tells us to check immutability. */
275 return kmap_atomic_prot(page, type, PAGE_NONE); 230 return kmap_atomic_prot(page, type, PAGE_NONE);
276} 231}
277EXPORT_SYMBOL(kmap_atomic); 232EXPORT_SYMBOL(__kmap_atomic);
278 233
279void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 234void __kunmap_atomic(void *kvaddr)
280{ 235{
281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 236 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
282 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
283 237
284 /* 238 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
285 * Force other mappings to Oops if they try to access this pte without 239 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
286 * first remapping it. Keeping stale mappings around is a bad idea.
287 */
288 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
289 pte_t *pte = kmap_get_pte(vaddr); 240 pte_t *pte = kmap_get_pte(vaddr);
290 pte_t pteval = *pte; 241 pte_t pteval = *pte;
242 int idx, type;
243
244 type = kmap_atomic_idx_pop();
245 idx = type + KM_TYPE_NR*smp_processor_id();
246
247 /*
248 * Force other mappings to Oops if they try to access this pte
249 * without first remapping it. Keeping stale mappings around
250 * is a bad idea.
251 */
291 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); 252 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
292 kmap_atomic_unregister(pte_page(pteval), vaddr); 253 kmap_atomic_unregister(pte_page(pteval), vaddr);
293 kpte_clear_flush(pte, vaddr); 254 kpte_clear_flush(pte, vaddr);
@@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
300 arch_flush_lazy_mmu_mode(); 261 arch_flush_lazy_mmu_mode();
301 pagefault_enable(); 262 pagefault_enable();
302} 263}
303EXPORT_SYMBOL(kunmap_atomic_notypecheck); 264EXPORT_SYMBOL(__kunmap_atomic);
304 265
305/* 266/*
306 * This API is supposed to allow us to map memory without a "struct page". 267 * This API is supposed to allow us to map memory without a "struct page".
307 * Currently we don't support this, though this may change in the future. 268 * Currently we don't support this, though this may change in the future.
308 */ 269 */
309void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 270void *kmap_atomic_pfn(unsigned long pfn)
310{ 271{
311 return kmap_atomic(pfn_to_page(pfn), type); 272 return kmap_atomic(pfn_to_page(pfn));
312} 273}
313void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 274void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
314{ 275{
315 return kmap_atomic_prot(pfn_to_page(pfn), type, prot); 276 return kmap_atomic_prot(pfn_to_page(pfn), prot);
316} 277}
317 278
318struct page *kmap_atomic_to_page(void *ptr) 279struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 8caac76ac324..3bd04022fd0c 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
59 59
60void *kmap(struct page *page); 60void *kmap(struct page *page);
61void kunmap(struct page *page); 61void kunmap(struct page *page);
62void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); 62
63void *kmap_atomic(struct page *page, enum km_type type); 63void *kmap_atomic_prot(struct page *page, pgprot_t prot);
64void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 64void *__kmap_atomic(struct page *page);
65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65void __kunmap_atomic(void *kvaddr);
66void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 66void *kmap_atomic_pfn(unsigned long pfn);
67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
67struct page *kmap_atomic_to_page(void *ptr); 68struct page *kmap_atomic_to_page(void *ptr);
68 69
69#define flush_cache_kmaps() do { } while (0) 70#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c4191b3b7056..363e33eb6ec1 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -27,10 +27,10 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29void __iomem * 29void __iomem *
30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
31 31
32void 32void
33iounmap_atomic(void __iomem *kvaddr, enum km_type type); 33iounmap_atomic(void __iomem *kvaddr);
34 34
35int 35int
36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); 36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 67414550c3cc..d5cd13945d5a 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
61 if (!is_crashed_pfn_valid(pfn)) 61 if (!is_crashed_pfn_valid(pfn))
62 return -EFAULT; 62 return -EFAULT;
63 63
64 vaddr = kmap_atomic_pfn(pfn, KM_PTE0); 64 vaddr = kmap_atomic_pfn(pfn);
65 65
66 if (!userbuf) { 66 if (!userbuf) {
67 memcpy(buf, (vaddr + offset), csize); 67 memcpy(buf, (vaddr + offset), csize);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 5e8fa12ef861..d723e369003c 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -9,6 +9,7 @@ void *kmap(struct page *page)
9 return page_address(page); 9 return page_address(page);
10 return kmap_high(page); 10 return kmap_high(page);
11} 11}
12EXPORT_SYMBOL(kmap);
12 13
13void kunmap(struct page *page) 14void kunmap(struct page *page)
14{ 15{
@@ -18,6 +19,7 @@ void kunmap(struct page *page)
18 return; 19 return;
19 kunmap_high(page); 20 kunmap_high(page);
20} 21}
22EXPORT_SYMBOL(kunmap);
21 23
22/* 24/*
23 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 25 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -27,10 +29,10 @@ void kunmap(struct page *page)
27 * However when holding an atomic kmap it is not legal to sleep, so atomic 29 * However when holding an atomic kmap it is not legal to sleep, so atomic
28 * kmaps are appropriate for short, tight code paths only. 30 * kmaps are appropriate for short, tight code paths only.
29 */ 31 */
30void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 32void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31{ 33{
32 enum fixed_addresses idx;
33 unsigned long vaddr; 34 unsigned long vaddr;
35 int idx, type;
34 36
35 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
36 pagefault_disable(); 38 pagefault_disable();
@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
38 if (!PageHighMem(page)) 40 if (!PageHighMem(page))
39 return page_address(page); 41 return page_address(page);
40 42
41 debug_kmap_atomic(type); 43 type = kmap_atomic_idx_push();
42
43 idx = type + KM_TYPE_NR*smp_processor_id(); 44 idx = type + KM_TYPE_NR*smp_processor_id();
44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45 BUG_ON(!pte_none(*(kmap_pte-idx))); 46 BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
47 48
48 return (void *)vaddr; 49 return (void *)vaddr;
49} 50}
51EXPORT_SYMBOL(kmap_atomic_prot);
52
53void *__kmap_atomic(struct page *page)
54{
55 return kmap_atomic_prot(page, kmap_prot);
56}
57EXPORT_SYMBOL(__kmap_atomic);
50 58
51void *kmap_atomic(struct page *page, enum km_type type) 59/*
60 * This is the same as kmap_atomic() but can map memory that doesn't
61 * have a struct page associated with it.
62 */
63void *kmap_atomic_pfn(unsigned long pfn)
52{ 64{
53 return kmap_atomic_prot(page, type, kmap_prot); 65 return kmap_atomic_prot_pfn(pfn, kmap_prot);
54} 66}
67EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
55 68
56void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 69void __kunmap_atomic(void *kvaddr)
57{ 70{
58 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
59 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 72
60 73 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
61 /* 74 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
62 * Force other mappings to Oops if they'll try to access this pte 75 int idx, type;
63 * without first remap it. Keeping stale mappings around is a bad idea 76
64 * also, in case the page changes cacheability attributes or becomes 77 type = kmap_atomic_idx_pop();
65 * a protected page in a hypervisor. 78 idx = type + KM_TYPE_NR * smp_processor_id();
66 */ 79
67 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) 80#ifdef CONFIG_DEBUG_HIGHMEM
81 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
82#endif
83 /*
84 * Force other mappings to Oops if they'll try to access this
85 * pte without first remap it. Keeping stale mappings around
86 * is a bad idea also, in case the page changes cacheability
87 * attributes or becomes a protected page in a hypervisor.
88 */
68 kpte_clear_flush(kmap_pte-idx, vaddr); 89 kpte_clear_flush(kmap_pte-idx, vaddr);
69 else { 90 }
70#ifdef CONFIG_DEBUG_HIGHMEM 91#ifdef CONFIG_DEBUG_HIGHMEM
92 else {
71 BUG_ON(vaddr < PAGE_OFFSET); 93 BUG_ON(vaddr < PAGE_OFFSET);
72 BUG_ON(vaddr >= (unsigned long)high_memory); 94 BUG_ON(vaddr >= (unsigned long)high_memory);
73#endif
74 } 95 }
96#endif
75 97
76 pagefault_enable(); 98 pagefault_enable();
77} 99}
78 100EXPORT_SYMBOL(__kunmap_atomic);
79/*
80 * This is the same as kmap_atomic() but can map memory that doesn't
81 * have a struct page associated with it.
82 */
83void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
84{
85 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
86}
87EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
88 101
89struct page *kmap_atomic_to_page(void *ptr) 102struct page *kmap_atomic_to_page(void *ptr)
90{ 103{
@@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr)
98 pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 111 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
99 return pte_page(*pte); 112 return pte_page(*pte);
100} 113}
101
102EXPORT_SYMBOL(kmap);
103EXPORT_SYMBOL(kunmap);
104EXPORT_SYMBOL(kmap_atomic);
105EXPORT_SYMBOL(kunmap_atomic_notypecheck);
106EXPORT_SYMBOL(kmap_atomic_prot);
107EXPORT_SYMBOL(kmap_atomic_to_page); 114EXPORT_SYMBOL(kmap_atomic_to_page);
108 115
109void __init set_highmem_pages_init(void) 116void __init set_highmem_pages_init(void)
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 72fc70cf6184..75a3d7f24a2c 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
48} 48}
49EXPORT_SYMBOL_GPL(iomap_create_wc); 49EXPORT_SYMBOL_GPL(iomap_create_wc);
50 50
51void 51void iomap_free(resource_size_t base, unsigned long size)
52iomap_free(resource_size_t base, unsigned long size)
53{ 52{
54 io_free_memtype(base, base + size); 53 io_free_memtype(base, base + size);
55} 54}
56EXPORT_SYMBOL_GPL(iomap_free); 55EXPORT_SYMBOL_GPL(iomap_free);
57 56
58void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 57void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
59{ 58{
60 enum fixed_addresses idx;
61 unsigned long vaddr; 59 unsigned long vaddr;
60 int idx, type;
62 61
63 pagefault_disable(); 62 pagefault_disable();
64 63
65 debug_kmap_atomic(type); 64 type = kmap_atomic_idx_push();
66 idx = type + KM_TYPE_NR * smp_processor_id(); 65 idx = type + KM_TYPE_NR * smp_processor_id();
67 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
68 set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); 67 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
72} 71}
73 72
74/* 73/*
75 * Map 'pfn' using fixed map 'type' and protections 'prot' 74 * Map 'pfn' using protections 'prot'
76 */ 75 */
77void __iomem * 76void __iomem *
78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 77iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
79{ 78{
80 /* 79 /*
81 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. 80 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
@@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 85 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
87 prot = PAGE_KERNEL_UC_MINUS; 86 prot = PAGE_KERNEL_UC_MINUS;
88 87
89 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); 88 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
90} 89}
91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 90EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
92 91
93void 92void
94iounmap_atomic(void __iomem *kvaddr, enum km_type type) 93iounmap_atomic(void __iomem *kvaddr)
95{ 94{
96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 95 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
98 96
99 /* 97 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
100 * Force other mappings to Oops if they'll try to access this pte 98 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
101 * without first remap it. Keeping stale mappings around is a bad idea 99 int idx, type;
102 * also, in case the page changes cacheability attributes or becomes 100
103 * a protected page in a hypervisor. 101 type = kmap_atomic_idx_pop();
104 */ 102 idx = type + KM_TYPE_NR * smp_processor_id();
105 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) 103
104#ifdef CONFIG_DEBUG_HIGHMEM
105 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
106#endif
107 /*
108 * Force other mappings to Oops if they'll try to access this
109 * pte without first remap it. Keeping stale mappings around
110 * is a bad idea also, in case the page changes cacheability
111 * attributes or becomes a protected page in a hypervisor.
112 */
106 kpte_clear_flush(kmap_pte-idx, vaddr); 113 kpte_clear_flush(kmap_pte-idx, vaddr);
114 }
107 115
108 pagefault_enable(); 116 pagefault_enable();
109} 117}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90b1d6753b9d..eb6c473c6d1b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -155,11 +155,11 @@ fast_shmem_read(struct page **pages,
155 char __iomem *vaddr; 155 char __iomem *vaddr;
156 int unwritten; 156 int unwritten;
157 157
158 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 158 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
159 if (vaddr == NULL) 159 if (vaddr == NULL)
160 return -ENOMEM; 160 return -ENOMEM;
161 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); 161 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
162 kunmap_atomic(vaddr, KM_USER0); 162 kunmap_atomic(vaddr);
163 163
164 if (unwritten) 164 if (unwritten)
165 return -EFAULT; 165 return -EFAULT;
@@ -509,10 +509,10 @@ fast_user_write(struct io_mapping *mapping,
509 char *vaddr_atomic; 509 char *vaddr_atomic;
510 unsigned long unwritten; 510 unsigned long unwritten;
511 511
512 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); 512 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
513 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 513 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
514 user_data, length); 514 user_data, length);
515 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); 515 io_mapping_unmap_atomic(vaddr_atomic);
516 if (unwritten) 516 if (unwritten)
517 return -EFAULT; 517 return -EFAULT;
518 return 0; 518 return 0;
@@ -551,11 +551,11 @@ fast_shmem_write(struct page **pages,
551 char __iomem *vaddr; 551 char __iomem *vaddr;
552 unsigned long unwritten; 552 unsigned long unwritten;
553 553
554 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 554 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
555 if (vaddr == NULL) 555 if (vaddr == NULL)
556 return -ENOMEM; 556 return -ENOMEM;
557 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); 557 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
558 kunmap_atomic(vaddr, KM_USER0); 558 kunmap_atomic(vaddr);
559 559
560 if (unwritten) 560 if (unwritten)
561 return -EFAULT; 561 return -EFAULT;
@@ -3346,8 +3346,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3346 reloc_offset = obj_priv->gtt_offset + reloc->offset; 3346 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3347 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 3347 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3348 (reloc_offset & 3348 (reloc_offset &
3349 ~(PAGE_SIZE - 1)), 3349 ~(PAGE_SIZE - 1)));
3350 KM_USER0);
3351 reloc_entry = (uint32_t __iomem *)(reloc_page + 3350 reloc_entry = (uint32_t __iomem *)(reloc_page +
3352 (reloc_offset & (PAGE_SIZE - 1))); 3351 (reloc_offset & (PAGE_SIZE - 1)));
3353 reloc_val = target_obj_priv->gtt_offset + reloc->delta; 3352 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
@@ -3358,7 +3357,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3358 readl(reloc_entry), reloc_val); 3357 readl(reloc_entry), reloc_val);
3359#endif 3358#endif
3360 writel(reloc_val, reloc_entry); 3359 writel(reloc_val, reloc_entry);
3361 io_mapping_unmap_atomic(reloc_page, KM_USER0); 3360 io_mapping_unmap_atomic(reloc_page);
3362 3361
3363 /* The updated presumed offset for this entry will be 3362 /* The updated presumed offset for this entry will be
3364 * copied back out to the user. 3363 * copied back out to the user.
@@ -4772,11 +4771,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4772 page_count = obj->size / PAGE_SIZE; 4771 page_count = obj->size / PAGE_SIZE;
4773 4772
4774 for (i = 0; i < page_count; i++) { 4773 for (i = 0; i < page_count; i++) {
4775 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); 4774 char *dst = kmap_atomic(obj_priv->pages[i]);
4776 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4775 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4777 4776
4778 memcpy(dst, src, PAGE_SIZE); 4777 memcpy(dst, src, PAGE_SIZE);
4779 kunmap_atomic(dst, KM_USER0); 4778 kunmap_atomic(dst);
4780 } 4779 }
4781 drm_clflush_pages(obj_priv->pages, page_count); 4780 drm_clflush_pages(obj_priv->pages, page_count);
4782 drm_agp_chipset_flush(dev); 4781 drm_agp_chipset_flush(dev);
@@ -4833,11 +4832,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4833 page_count = obj->size / PAGE_SIZE; 4832 page_count = obj->size / PAGE_SIZE;
4834 4833
4835 for (i = 0; i < page_count; i++) { 4834 for (i = 0; i < page_count; i++) {
4836 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); 4835 char *src = kmap_atomic(obj_priv->pages[i]);
4837 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4836 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4838 4837
4839 memcpy(dst, src, PAGE_SIZE); 4838 memcpy(dst, src, PAGE_SIZE);
4840 kunmap_atomic(src, KM_USER0); 4839 kunmap_atomic(src);
4841 } 4840 }
4842 4841
4843 i915_gem_object_put_pages(obj); 4842 i915_gem_object_put_pages(obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 744225ebb4b2..b80010f0c4c9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev,
456 456
457 local_irq_save(flags); 457 local_irq_save(flags);
458 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 458 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
459 reloc_offset, 459 reloc_offset);
460 KM_IRQ0);
461 memcpy_fromio(d, s, PAGE_SIZE); 460 memcpy_fromio(d, s, PAGE_SIZE);
462 io_mapping_unmap_atomic(s, KM_IRQ0); 461 io_mapping_unmap_atomic(s);
463 local_irq_restore(flags); 462 local_irq_restore(flags);
464 463
465 dst->pages[page] = d; 464 dst->pages[page] = d;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1d306a458be6..3264bbd47e65 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,8 +187,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
187 187
188 if (OVERLAY_NONPHYSICAL(overlay->dev)) { 188 if (OVERLAY_NONPHYSICAL(overlay->dev)) {
189 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 189 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
190 overlay->reg_bo->gtt_offset, 190 overlay->reg_bo->gtt_offset);
191 KM_USER0);
192 191
193 if (!regs) { 192 if (!regs) {
194 DRM_ERROR("failed to map overlay regs in GTT\n"); 193 DRM_ERROR("failed to map overlay regs in GTT\n");
@@ -203,7 +202,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
203static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 202static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
204{ 203{
205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 204 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); 205 io_mapping_unmap_atomic(overlay->virt_addr);
207 206
208 overlay->virt_addr = NULL; 207 overlay->virt_addr = NULL;
209 208
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 974b0f8ae048..8fa339600fe3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2167,11 +2167,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
2167 2167
2168 if (off < pci_resource_len(dev->pdev, 1)) { 2168 if (off < pci_resource_len(dev->pdev, 1)) {
2169 uint8_t __iomem *p = 2169 uint8_t __iomem *p =
2170 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2170 io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
2171 2171
2172 val = ioread32(p + (off & ~PAGE_MASK)); 2172 val = ioread32(p + (off & ~PAGE_MASK));
2173 2173
2174 io_mapping_unmap_atomic(p, KM_USER0); 2174 io_mapping_unmap_atomic(p);
2175 } 2175 }
2176 2176
2177 return val; 2177 return val;
@@ -2183,12 +2183,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
2183{ 2183{
2184 if (off < pci_resource_len(dev->pdev, 1)) { 2184 if (off < pci_resource_len(dev->pdev, 1)) {
2185 uint8_t __iomem *p = 2185 uint8_t __iomem *p =
2186 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2186 io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
2187 2187
2188 iowrite32(val, p + (off & ~PAGE_MASK)); 2188 iowrite32(val, p + (off & ~PAGE_MASK));
2189 wmb(); 2189 wmb();
2190 2190
2191 io_mapping_unmap_atomic(p, KM_USER0); 2191 io_mapping_unmap_atomic(p);
2192 } 2192 }
2193} 2193}
2194 2194
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3451a82adba7..e8a73e65da69 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -170,7 +170,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
170 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 170 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
171 171
172#ifdef CONFIG_X86 172#ifdef CONFIG_X86
173 dst = kmap_atomic_prot(d, KM_USER0, prot); 173 dst = kmap_atomic_prot(d, prot);
174#else 174#else
175 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 175 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
176 dst = vmap(&d, 1, 0, prot); 176 dst = vmap(&d, 1, 0, prot);
@@ -183,7 +183,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
183 memcpy_fromio(dst, src, PAGE_SIZE); 183 memcpy_fromio(dst, src, PAGE_SIZE);
184 184
185#ifdef CONFIG_X86 185#ifdef CONFIG_X86
186 kunmap_atomic(dst, KM_USER0); 186 kunmap_atomic(dst);
187#else 187#else
188 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 188 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
189 vunmap(dst); 189 vunmap(dst);
@@ -206,7 +206,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
206 206
207 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 207 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
208#ifdef CONFIG_X86 208#ifdef CONFIG_X86
209 src = kmap_atomic_prot(s, KM_USER0, prot); 209 src = kmap_atomic_prot(s, prot);
210#else 210#else
211 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 211 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
212 src = vmap(&s, 1, 0, prot); 212 src = vmap(&s, 1, 0, prot);
@@ -219,7 +219,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
219 memcpy_toio(dst, src, PAGE_SIZE); 219 memcpy_toio(dst, src, PAGE_SIZE);
220 220
221#ifdef CONFIG_X86 221#ifdef CONFIG_X86
222 kunmap_atomic(src, KM_USER0); 222 kunmap_atomic(src);
223#else 223#else
224 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 224 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
225 vunmap(src); 225 vunmap(src);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 283cd47bb34c..8a85ec109a3a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
28 28
29#include <asm/kmap_types.h> 29#include <asm/kmap_types.h>
30 30
31#ifdef CONFIG_DEBUG_HIGHMEM
32
33void debug_kmap_atomic(enum km_type type);
34
35#else
36
37static inline void debug_kmap_atomic(enum km_type type)
38{
39}
40
41#endif
42
43#ifdef CONFIG_HIGHMEM 31#ifdef CONFIG_HIGHMEM
44#include <asm/highmem.h> 32#include <asm/highmem.h>
45 33
@@ -49,6 +37,27 @@ extern unsigned long totalhigh_pages;
49 37
50void kmap_flush_unused(void); 38void kmap_flush_unused(void);
51 39
40DECLARE_PER_CPU(int, __kmap_atomic_idx);
41
42static inline int kmap_atomic_idx_push(void)
43{
44 int idx = __get_cpu_var(__kmap_atomic_idx)++;
45#ifdef CONFIG_DEBUG_HIGHMEM
46 WARN_ON_ONCE(in_irq() && !irqs_disabled());
47 BUG_ON(idx > KM_TYPE_NR);
48#endif
49 return idx;
50}
51
52static inline int kmap_atomic_idx_pop(void)
53{
54 int idx = --__get_cpu_var(__kmap_atomic_idx);
55#ifdef CONFIG_DEBUG_HIGHMEM
56 BUG_ON(idx < 0);
57#endif
58 return idx;
59}
60
52#else /* CONFIG_HIGHMEM */ 61#else /* CONFIG_HIGHMEM */
53 62
54static inline unsigned int nr_free_highpages(void) { return 0; } 63static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -66,19 +75,19 @@ static inline void kunmap(struct page *page)
66{ 75{
67} 76}
68 77
69static inline void *kmap_atomic(struct page *page, enum km_type idx) 78static inline void *__kmap_atomic(struct page *page)
70{ 79{
71 pagefault_disable(); 80 pagefault_disable();
72 return page_address(page); 81 return page_address(page);
73} 82}
74#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 83#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
75 84
76static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) 85static inline void __kunmap_atomic(void *addr)
77{ 86{
78 pagefault_enable(); 87 pagefault_enable();
79} 88}
80 89
81#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 90#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
82#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 91#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
83 92
84#define kmap_flush_unused() do {} while(0) 93#define kmap_flush_unused() do {} while(0)
@@ -86,12 +95,20 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
86 95
87#endif /* CONFIG_HIGHMEM */ 96#endif /* CONFIG_HIGHMEM */
88 97
89/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */ 98/*
90/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */ 99 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
91#define kunmap_atomic(addr, idx) do { \ 100 */
92 BUILD_BUG_ON(__same_type((addr), struct page *)); \ 101#define kmap_atomic(page, args...) __kmap_atomic(page)
93 kunmap_atomic_notypecheck((addr), (idx)); \ 102
94 } while (0) 103/*
104 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
105 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
106 */
107#define kunmap_atomic(addr, args...) \
108do { \
109 BUILD_BUG_ON(__same_type((addr), struct page *)); \
110 __kunmap_atomic(addr); \
111} while (0)
95 112
96/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 113/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
97#ifndef clear_user_highpage 114#ifndef clear_user_highpage
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 7fb592793738..8cdcc2a199ad 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping)
81/* Atomic map/unmap */ 81/* Atomic map/unmap */
82static inline void __iomem * 82static inline void __iomem *
83io_mapping_map_atomic_wc(struct io_mapping *mapping, 83io_mapping_map_atomic_wc(struct io_mapping *mapping,
84 unsigned long offset, 84 unsigned long offset)
85 int slot)
86{ 85{
87 resource_size_t phys_addr; 86 resource_size_t phys_addr;
88 unsigned long pfn; 87 unsigned long pfn;
@@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
90 BUG_ON(offset >= mapping->size); 89 BUG_ON(offset >= mapping->size);
91 phys_addr = mapping->base + offset; 90 phys_addr = mapping->base + offset;
92 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); 91 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
93 return iomap_atomic_prot_pfn(pfn, slot, mapping->prot); 92 return iomap_atomic_prot_pfn(pfn, mapping->prot);
94} 93}
95 94
96static inline void 95static inline void
97io_mapping_unmap_atomic(void __iomem *vaddr, int slot) 96io_mapping_unmap_atomic(void __iomem *vaddr)
98{ 97{
99 iounmap_atomic(vaddr, slot); 98 iounmap_atomic(vaddr);
100} 99}
101 100
102static inline void __iomem * 101static inline void __iomem *
@@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping)
137/* Atomic map/unmap */ 136/* Atomic map/unmap */
138static inline void __iomem * 137static inline void __iomem *
139io_mapping_map_atomic_wc(struct io_mapping *mapping, 138io_mapping_map_atomic_wc(struct io_mapping *mapping,
140 unsigned long offset, 139 unsigned long offset)
141 int slot)
142{ 140{
143 return ((char __force __iomem *) mapping) + offset; 141 return ((char __force __iomem *) mapping) + offset;
144} 142}
145 143
146static inline void 144static inline void
147io_mapping_unmap_atomic(void __iomem *vaddr, int slot) 145io_mapping_unmap_atomic(void __iomem *vaddr)
148{ 146{
149} 147}
150 148
diff --git a/mm/highmem.c b/mm/highmem.c
index 7a0aa1be4993..781e754a75ac 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -42,6 +42,10 @@
42unsigned long totalhigh_pages __read_mostly; 42unsigned long totalhigh_pages __read_mostly;
43EXPORT_SYMBOL(totalhigh_pages); 43EXPORT_SYMBOL(totalhigh_pages);
44 44
45
46DEFINE_PER_CPU(int, __kmap_atomic_idx);
47EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
48
45unsigned int nr_free_highpages (void) 49unsigned int nr_free_highpages (void)
46{ 50{
47 pg_data_t *pgdat; 51 pg_data_t *pgdat;
@@ -422,61 +426,3 @@ void __init page_address_init(void)
422} 426}
423 427
424#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ 428#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
425
426#ifdef CONFIG_DEBUG_HIGHMEM
427
428void debug_kmap_atomic(enum km_type type)
429{
430 static int warn_count = 10;
431
432 if (unlikely(warn_count < 0))
433 return;
434
435 if (unlikely(in_interrupt())) {
436 if (in_nmi()) {
437 if (type != KM_NMI && type != KM_NMI_PTE) {
438 WARN_ON(1);
439 warn_count--;
440 }
441 } else if (in_irq()) {
442 if (type != KM_IRQ0 && type != KM_IRQ1 &&
443 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
444 type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
445 WARN_ON(1);
446 warn_count--;
447 }
448 } else if (!irqs_disabled()) { /* softirq */
449 if (type != KM_IRQ0 && type != KM_IRQ1 &&
450 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
451 type != KM_SKB_SUNRPC_DATA &&
452 type != KM_SKB_DATA_SOFTIRQ &&
453 type != KM_BOUNCE_READ) {
454 WARN_ON(1);
455 warn_count--;
456 }
457 }
458 }
459
460 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
461 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
462 type == KM_IRQ_PTE || type == KM_NMI ||
463 type == KM_NMI_PTE ) {
464 if (!irqs_disabled()) {
465 WARN_ON(1);
466 warn_count--;
467 }
468 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
469 if (irq_count() == 0 && !irqs_disabled()) {
470 WARN_ON(1);
471 warn_count--;
472 }
473 }
474#ifdef CONFIG_KGDB_KDB
475 if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) {
476 WARN_ON(1);
477 warn_count--;
478 }
479#endif /* CONFIG_KGDB_KDB */
480}
481
482#endif