aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/highmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/highmem.c')
-rw-r--r--arch/arm/mm/highmem.c111
1 files changed, 15 insertions, 96 deletions
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 1fbdb55bfd1b..807c0573abbe 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
36} 36}
37EXPORT_SYMBOL(kunmap); 37EXPORT_SYMBOL(kunmap);
38 38
39void *kmap_atomic(struct page *page, enum km_type type) 39void *__kmap_atomic(struct page *page)
40{ 40{
41 unsigned int idx; 41 unsigned int idx;
42 unsigned long vaddr; 42 unsigned long vaddr;
43 void *kmap; 43 void *kmap;
44 int type;
44 45
45 pagefault_disable(); 46 pagefault_disable();
46 if (!PageHighMem(page)) 47 if (!PageHighMem(page))
47 return page_address(page); 48 return page_address(page);
48 49
49 debug_kmap_atomic(type);
50
51#ifdef CONFIG_DEBUG_HIGHMEM 50#ifdef CONFIG_DEBUG_HIGHMEM
52 /* 51 /*
53 * There is no cache coherency issue when non VIVT, so force the 52 * There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
61 if (kmap) 60 if (kmap)
62 return kmap; 61 return kmap;
63 62
63 type = kmap_atomic_idx_push();
64
64 idx = type + KM_TYPE_NR * smp_processor_id(); 65 idx = type + KM_TYPE_NR * smp_processor_id();
65 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
66#ifdef CONFIG_DEBUG_HIGHMEM 67#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
80 81
81 return (void *)vaddr; 82 return (void *)vaddr;
82} 83}
83EXPORT_SYMBOL(kmap_atomic); 84EXPORT_SYMBOL(__kmap_atomic);
84 85
85void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 86void __kunmap_atomic(void *kvaddr)
86{ 87{
87 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 88 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
88 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 89 int idx, type;
89 90
90 if (kvaddr >= (void *)FIXADDR_START) { 91 if (kvaddr >= (void *)FIXADDR_START) {
92 type = kmap_atomic_idx();
93 idx = type + KM_TYPE_NR * smp_processor_id();
94
91 if (cache_is_vivt()) 95 if (cache_is_vivt())
92 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
93#ifdef CONFIG_DEBUG_HIGHMEM 97#ifdef CONFIG_DEBUG_HIGHMEM
@@ -97,21 +101,23 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
97#else 101#else
98 (void) idx; /* to kill a warning */ 102 (void) idx; /* to kill a warning */
99#endif 103#endif
104 kmap_atomic_idx_pop();
100 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { 105 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
101 /* this address was obtained through kmap_high_get() */ 106 /* this address was obtained through kmap_high_get() */
102 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); 107 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
103 } 108 }
104 pagefault_enable(); 109 pagefault_enable();
105} 110}
106EXPORT_SYMBOL(kunmap_atomic_notypecheck); 111EXPORT_SYMBOL(__kunmap_atomic);
107 112
108void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 113void *kmap_atomic_pfn(unsigned long pfn)
109{ 114{
110 unsigned int idx;
111 unsigned long vaddr; 115 unsigned long vaddr;
116 int idx, type;
112 117
113 pagefault_disable(); 118 pagefault_disable();
114 119
120 type = kmap_atomic_idx_push();
115 idx = type + KM_TYPE_NR * smp_processor_id(); 121 idx = type + KM_TYPE_NR * smp_processor_id();
116 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 122 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
117#ifdef CONFIG_DEBUG_HIGHMEM 123#ifdef CONFIG_DEBUG_HIGHMEM
@@ -134,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
134 pte = TOP_PTE(vaddr); 140 pte = TOP_PTE(vaddr);
135 return pte_page(*pte); 141 return pte_page(*pte);
136} 142}
137
138#ifdef CONFIG_CPU_CACHE_VIPT
139
140#include <linux/percpu.h>
141
142/*
143 * The VIVT cache of a highmem page is always flushed before the page
144 * is unmapped. Hence unmapped highmem pages need no cache maintenance
145 * in that case.
146 *
147 * However unmapped pages may still be cached with a VIPT cache, and
148 * it is not possible to perform cache maintenance on them using physical
149 * addresses unfortunately. So we have no choice but to set up a temporary
150 * virtual mapping for that purpose.
151 *
152 * Yet this VIPT cache maintenance may be triggered from DMA support
153 * functions which are possibly called from interrupt context. As we don't
154 * want to keep interrupt disabled all the time when such maintenance is
155 * taking place, we therefore allow for some reentrancy by preserving and
156 * restoring the previous fixmap entry before the interrupted context is
157 * resumed. If the reentrancy depth is 0 then there is no need to restore
158 * the previous fixmap, and leaving the current one in place allow it to
159 * be reused the next time without a TLB flush (common with DMA).
160 */
161
162static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
163
164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
165{
166 unsigned int idx, cpu;
167 int *depth;
168 unsigned long vaddr, flags;
169 pte_t pte, *ptep;
170
171 if (!in_interrupt())
172 preempt_disable();
173
174 cpu = smp_processor_id();
175 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
176
177 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
179 ptep = TOP_PTE(vaddr);
180 pte = mk_pte(page, kmap_prot);
181
182 raw_local_irq_save(flags);
183 (*depth)++;
184 if (pte_val(*ptep) == pte_val(pte)) {
185 *saved_pte = pte;
186 } else {
187 *saved_pte = *ptep;
188 set_pte_ext(ptep, pte, 0);
189 local_flush_tlb_kernel_page(vaddr);
190 }
191 raw_local_irq_restore(flags);
192
193 return (void *)vaddr;
194}
195
196void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
197{
198 unsigned int idx, cpu = smp_processor_id();
199 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
200 unsigned long vaddr, flags;
201 pte_t pte, *ptep;
202
203 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
204 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
205 ptep = TOP_PTE(vaddr);
206 pte = mk_pte(page, kmap_prot);
207
208 BUG_ON(pte_val(*ptep) != pte_val(pte));
209 BUG_ON(*depth <= 0);
210
211 raw_local_irq_save(flags);
212 (*depth)--;
213 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
214 set_pte_ext(ptep, saved_pte, 0);
215 local_flush_tlb_kernel_page(vaddr);
216 }
217 raw_local_irq_restore(flags);
218
219 if (!in_interrupt())
220 preempt_enable();
221}
222
223#endif /* CONFIG_CPU_CACHE_VIPT */