diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2010-12-15 15:14:45 -0500 |
---|---|---|
committer | Nicolas Pitre <nico@fluxnic.net> | 2010-12-19 12:56:46 -0500 |
commit | 39af22a79232373764904576f31572f1db76af10 (patch) | |
tree | 20d31ab6a01b93dabe9c05277bcb1413e978648b /arch/arm/mm/highmem.c | |
parent | b0c3844d8af6b9f3f18f31e1b0502fbefa2166be (diff) |
ARM: get rid of kmap_high_l1_vipt()
Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is no longer
necessary to carry an ad hoc version of kmap_atomic() added in commit
7e5a69e83b "ARM: 6007/1: fix highmem with VIPT cache and DMA" to cope
with reentrancy.
In fact, it is now actively wrong to rely on fixed kmap type indices
(namely KM_L1_CACHE) as kmap_atomic() totally ignores them now and a
concurrent instance of it may reuse any slot for any purpose.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Diffstat (limited to 'arch/arm/mm/highmem.c')
-rw-r--r-- | arch/arm/mm/highmem.c | 87 |
1 files changed, 0 insertions, 87 deletions
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index c435fd9e1da9..807c0573abbe 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
140 | pte = TOP_PTE(vaddr); | 140 | pte = TOP_PTE(vaddr); |
141 | return pte_page(*pte); | 141 | return pte_page(*pte); |
142 | } | 142 | } |
143 | |||
144 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
145 | |||
146 | #include <linux/percpu.h> | ||
147 | |||
148 | /* | ||
149 | * The VIVT cache of a highmem page is always flushed before the page | ||
150 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
151 | * in that case. | ||
152 | * | ||
153 | * However unmapped pages may still be cached with a VIPT cache, and | ||
154 | * it is not possible to perform cache maintenance on them using physical | ||
155 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
156 | * virtual mapping for that purpose. | ||
157 | * | ||
158 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
159 | * functions which are possibly called from interrupt context. As we don't | ||
160 | * want to keep interrupt disabled all the time when such maintenance is | ||
161 | * taking place, we therefore allow for some reentrancy by preserving and | ||
162 | * restoring the previous fixmap entry before the interrupted context is | ||
163 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
164 | * the previous fixmap, and leaving the current one in place allow it to | ||
165 | * be reused the next time without a TLB flush (common with DMA). | ||
166 | */ | ||
167 | |||
168 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
169 | |||
170 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
171 | { | ||
172 | unsigned int idx, cpu; | ||
173 | int *depth; | ||
174 | unsigned long vaddr, flags; | ||
175 | pte_t pte, *ptep; | ||
176 | |||
177 | if (!in_interrupt()) | ||
178 | preempt_disable(); | ||
179 | |||
180 | cpu = smp_processor_id(); | ||
181 | depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
182 | |||
183 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
184 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
185 | ptep = TOP_PTE(vaddr); | ||
186 | pte = mk_pte(page, kmap_prot); | ||
187 | |||
188 | raw_local_irq_save(flags); | ||
189 | (*depth)++; | ||
190 | if (pte_val(*ptep) == pte_val(pte)) { | ||
191 | *saved_pte = pte; | ||
192 | } else { | ||
193 | *saved_pte = *ptep; | ||
194 | set_pte_ext(ptep, pte, 0); | ||
195 | local_flush_tlb_kernel_page(vaddr); | ||
196 | } | ||
197 | raw_local_irq_restore(flags); | ||
198 | |||
199 | return (void *)vaddr; | ||
200 | } | ||
201 | |||
202 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
203 | { | ||
204 | unsigned int idx, cpu = smp_processor_id(); | ||
205 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
206 | unsigned long vaddr, flags; | ||
207 | pte_t pte, *ptep; | ||
208 | |||
209 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
210 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
211 | ptep = TOP_PTE(vaddr); | ||
212 | pte = mk_pte(page, kmap_prot); | ||
213 | |||
214 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
215 | BUG_ON(*depth <= 0); | ||
216 | |||
217 | raw_local_irq_save(flags); | ||
218 | (*depth)--; | ||
219 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
220 | set_pte_ext(ptep, saved_pte, 0); | ||
221 | local_flush_tlb_kernel_page(vaddr); | ||
222 | } | ||
223 | raw_local_irq_restore(flags); | ||
224 | |||
225 | if (!in_interrupt()) | ||
226 | preempt_enable(); | ||
227 | } | ||
228 | |||
229 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||