diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2010-12-15 15:14:45 -0500 |
---|---|---|
committer | Nicolas Pitre <nico@fluxnic.net> | 2010-12-19 12:56:46 -0500 |
commit | 39af22a79232373764904576f31572f1db76af10 (patch) | |
tree | 20d31ab6a01b93dabe9c05277bcb1413e978648b | |
parent | b0c3844d8af6b9f3f18f31e1b0502fbefa2166be (diff) |
ARM: get rid of kmap_high_l1_vipt()
Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is no longer
necessary to carry an ad hoc version of kmap_atomic() added in commit
7e5a69e83b "ARM: 6007/1: fix highmem with VIPT cache and DMA" to cope
with reentrancy.
In fact, it is now actively wrong to rely on fixed kmap type indices
(namely KM_L1_CACHE) as kmap_atomic() totally ignores them now and a
concurrent instance of it may reuse any slot for any purpose.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
-rw-r--r-- | arch/arm/include/asm/highmem.h | 3 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/highmem.c | 87 |
4 files changed, 8 insertions, 96 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 1fc684e70ab6..7080e2c8fa62 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page); | |||
25 | extern void *kmap_high_get(struct page *page); | 25 | extern void *kmap_high_get(struct page *page); |
26 | extern void kunmap_high(struct page *page); | 26 | extern void kunmap_high(struct page *page); |
27 | 27 | ||
28 | extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); | ||
29 | extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); | ||
30 | |||
31 | /* | 28 | /* |
32 | * The following functions are already defined by <linux/highmem.h> | 29 | * The following functions are already defined by <linux/highmem.h> |
33 | * when CONFIG_HIGHMEM is not set. | 30 | * when CONFIG_HIGHMEM is not set. |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ac6a36142fcd..809f1bf9fa29 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/highmem.h> | ||
20 | 21 | ||
21 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
22 | #include <asm/highmem.h> | 23 | #include <asm/highmem.h> |
@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
480 | op(vaddr, len, dir); | 481 | op(vaddr, len, dir); |
481 | kunmap_high(page); | 482 | kunmap_high(page); |
482 | } else if (cache_is_vipt()) { | 483 | } else if (cache_is_vipt()) { |
483 | pte_t saved_pte; | 484 | /* unmapped pages might still be cached */ |
484 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | 485 | vaddr = kmap_atomic(page); |
485 | op(vaddr + offset, len, dir); | 486 | op(vaddr + offset, len, dir); |
486 | kunmap_high_l1_vipt(page, saved_pte); | 487 | kunmap_atomic(vaddr); |
487 | } | 488 | } |
488 | } else { | 489 | } else { |
489 | vaddr = page_address(page) + offset; | 490 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 391ffae75098..c29f2839f1d2 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
13 | #include <linux/highmem.h> | ||
13 | 14 | ||
14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
15 | #include <asm/cachetype.h> | 16 | #include <asm/cachetype.h> |
@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
180 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 181 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
181 | kunmap_high(page); | 182 | kunmap_high(page); |
182 | } else if (cache_is_vipt()) { | 183 | } else if (cache_is_vipt()) { |
183 | pte_t saved_pte; | 184 | /* unmapped pages might still be cached */ |
184 | addr = kmap_high_l1_vipt(page, &saved_pte); | 185 | addr = kmap_atomic(page); |
185 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
186 | kunmap_high_l1_vipt(page, saved_pte); | 187 | kunmap_atomic(addr); |
187 | } | 188 | } |
188 | } | 189 | } |
189 | 190 | ||
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index c435fd9e1da9..807c0573abbe 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
140 | pte = TOP_PTE(vaddr); | 140 | pte = TOP_PTE(vaddr); |
141 | return pte_page(*pte); | 141 | return pte_page(*pte); |
142 | } | 142 | } |
143 | |||
144 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
145 | |||
146 | #include <linux/percpu.h> | ||
147 | |||
148 | /* | ||
149 | * The VIVT cache of a highmem page is always flushed before the page | ||
150 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
151 | * in that case. | ||
152 | * | ||
153 | * However unmapped pages may still be cached with a VIPT cache, and | ||
154 | * it is not possible to perform cache maintenance on them using physical | ||
155 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
156 | * virtual mapping for that purpose. | ||
157 | * | ||
158 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
159 | * functions which are possibly called from interrupt context. As we don't | ||
160 | * want to keep interrupt disabled all the time when such maintenance is | ||
161 | * taking place, we therefore allow for some reentrancy by preserving and | ||
162 | * restoring the previous fixmap entry before the interrupted context is | ||
163 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
164 | * the previous fixmap, and leaving the current one in place allow it to | ||
165 | * be reused the next time without a TLB flush (common with DMA). | ||
166 | */ | ||
167 | |||
168 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
169 | |||
170 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
171 | { | ||
172 | unsigned int idx, cpu; | ||
173 | int *depth; | ||
174 | unsigned long vaddr, flags; | ||
175 | pte_t pte, *ptep; | ||
176 | |||
177 | if (!in_interrupt()) | ||
178 | preempt_disable(); | ||
179 | |||
180 | cpu = smp_processor_id(); | ||
181 | depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
182 | |||
183 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
184 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
185 | ptep = TOP_PTE(vaddr); | ||
186 | pte = mk_pte(page, kmap_prot); | ||
187 | |||
188 | raw_local_irq_save(flags); | ||
189 | (*depth)++; | ||
190 | if (pte_val(*ptep) == pte_val(pte)) { | ||
191 | *saved_pte = pte; | ||
192 | } else { | ||
193 | *saved_pte = *ptep; | ||
194 | set_pte_ext(ptep, pte, 0); | ||
195 | local_flush_tlb_kernel_page(vaddr); | ||
196 | } | ||
197 | raw_local_irq_restore(flags); | ||
198 | |||
199 | return (void *)vaddr; | ||
200 | } | ||
201 | |||
202 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
203 | { | ||
204 | unsigned int idx, cpu = smp_processor_id(); | ||
205 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
206 | unsigned long vaddr, flags; | ||
207 | pte_t pte, *ptep; | ||
208 | |||
209 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
210 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
211 | ptep = TOP_PTE(vaddr); | ||
212 | pte = mk_pte(page, kmap_prot); | ||
213 | |||
214 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
215 | BUG_ON(*depth <= 0); | ||
216 | |||
217 | raw_local_irq_save(flags); | ||
218 | (*depth)--; | ||
219 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
220 | set_pte_ext(ptep, saved_pte, 0); | ||
221 | local_flush_tlb_kernel_page(vaddr); | ||
222 | } | ||
223 | raw_local_irq_restore(flags); | ||
224 | |||
225 | if (!in_interrupt()) | ||
226 | preempt_enable(); | ||
227 | } | ||
228 | |||
229 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||