aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2010-12-16 14:56:34 -0500
committerNicolas Pitre <nico@fluxnic.net>2010-12-19 12:57:16 -0500
commit6d3e6d3640052cac958d61c44597cc216f6ee09f (patch)
treeba246fb50f8d637a0c6074211ddc2e74c4eb1ebe
parent25cbe45440ea89a3b0f6f7ed326d3d476d53068b (diff)
ARM: fix cache-feroceon-l2 after stack based kmap_atomic()
Since commit 3e4d3af501 "mm: stack based kmap_atomic()", it is actively wrong to rely on fixed kmap type indices (namely KM_L2_CACHE) as kmap_atomic() totally ignores them and a concurrent instance of it may happily reuse any slot for any purpose. Because kmap_atomic() is now able to deal with reentrancy, we can get rid of the ad hoc mapping here. While the code is made much simpler, there is a needless cache flush introduced by the usage of __kunmap_atomic(). It is not clear if the performance difference to remove that is worth the cost in code maintenance (I don't think there are that many highmem users on that platform anyway) but that should be reconsidered when/if someone cares enough to do some measurements. Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 6e77c042d8e9..e0b0e7a4ec68 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -13,13 +13,9 @@
13 */ 13 */
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/highmem.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17#include <asm/kmap_types.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
21#include <plat/cache-feroceon-l2.h> 18#include <plat/cache-feroceon-l2.h>
22#include "mm.h"
23 19
24/* 20/*
25 * Low-level cache maintenance operations. 21 * Low-level cache maintenance operations.
@@ -39,27 +35,30 @@
39 * between which we don't want to be preempted. 35 * between which we don't want to be preempted.
40 */ 36 */
41 37
42static inline unsigned long l2_start_va(unsigned long paddr) 38static inline unsigned long l2_get_va(unsigned long paddr)
43{ 39{
44#ifdef CONFIG_HIGHMEM 40#ifdef CONFIG_HIGHMEM
45 /* 41 /*
46 * Let's do our own fixmap stuff in a minimal way here.
47 * Because range ops can't be done on physical addresses, 42 * Because range ops can't be done on physical addresses,
48 * we simply install a virtual mapping for it only for the 43 * we simply install a virtual mapping for it only for the
49 * TLB lookup to occur, hence no need to flush the untouched 44 * TLB lookup to occur, hence no need to flush the untouched
50 * memory mapping. This is protected with the disabling of 45 * memory mapping afterwards (note: a cache flush may happen
51 * interrupts by the caller. 46 * in some circumstances depending on the path taken in kunmap_atomic).
52 */ 47 */
53 unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); 48 void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
54 unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 49 return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
55 set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
56 local_flush_tlb_kernel_page(vaddr);
57 return vaddr + (paddr & ~PAGE_MASK);
58#else 50#else
59 return __phys_to_virt(paddr); 51 return __phys_to_virt(paddr);
60#endif 52#endif
61} 53}
62 54
55static inline void l2_put_va(unsigned long vaddr)
56{
57#ifdef CONFIG_HIGHMEM
58 kunmap_atomic((void *)vaddr);
59#endif
60}
61
63static inline void l2_clean_pa(unsigned long addr) 62static inline void l2_clean_pa(unsigned long addr)
64{ 63{
65 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); 64 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
@@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
76 */ 75 */
77 BUG_ON((start ^ end) >> PAGE_SHIFT); 76 BUG_ON((start ^ end) >> PAGE_SHIFT);
78 77
79 raw_local_irq_save(flags); 78 va_start = l2_get_va(start);
80 va_start = l2_start_va(start);
81 va_end = va_start + (end - start); 79 va_end = va_start + (end - start);
80 raw_local_irq_save(flags);
82 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" 81 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
83 "mcr p15, 1, %1, c15, c9, 5" 82 "mcr p15, 1, %1, c15, c9, 5"
84 : : "r" (va_start), "r" (va_end)); 83 : : "r" (va_start), "r" (va_end));
85 raw_local_irq_restore(flags); 84 raw_local_irq_restore(flags);
85 l2_put_va(va_start);
86} 86}
87 87
88static inline void l2_clean_inv_pa(unsigned long addr) 88static inline void l2_clean_inv_pa(unsigned long addr)
@@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
106 */ 106 */
107 BUG_ON((start ^ end) >> PAGE_SHIFT); 107 BUG_ON((start ^ end) >> PAGE_SHIFT);
108 108
109 raw_local_irq_save(flags); 109 va_start = l2_get_va(start);
110 va_start = l2_start_va(start);
111 va_end = va_start + (end - start); 110 va_end = va_start + (end - start);
111 raw_local_irq_save(flags);
112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" 112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
113 "mcr p15, 1, %1, c15, c11, 5" 113 "mcr p15, 1, %1, c15, c11, 5"
114 : : "r" (va_start), "r" (va_end)); 114 : : "r" (va_start), "r" (va_end));
115 raw_local_irq_restore(flags); 115 raw_local_irq_restore(flags);
116 l2_put_va(va_start);
116} 117}
117 118
118static inline void l2_inv_all(void) 119static inline void l2_inv_all(void)