diff options
Diffstat (limited to 'arch/arm/mm/cache-xsc3l2.c')
-rw-r--r-- | arch/arm/mm/cache-xsc3l2.c | 107 |
1 files changed, 80 insertions, 27 deletions
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 464de893a988..5d180cb0bd94 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
@@ -17,12 +17,14 @@ | |||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/io.h> | ||
22 | |||
23 | #include <asm/system.h> | 20 | #include <asm/system.h> |
24 | #include <asm/cputype.h> | 21 | #include <asm/cputype.h> |
25 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/kmap_types.h> | ||
24 | #include <asm/fixmap.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include "mm.h" | ||
26 | 28 | ||
27 | #define CR_L2 (1 << 26) | 29 | #define CR_L2 (1 << 26) |
28 | 30 | ||
@@ -47,21 +49,11 @@ static inline void xsc3_l2_clean_mva(unsigned long addr) | |||
47 | __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); | 49 | __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); |
48 | } | 50 | } |
49 | 51 | ||
50 | static inline void xsc3_l2_clean_pa(unsigned long addr) | ||
51 | { | ||
52 | xsc3_l2_clean_mva(__phys_to_virt(addr)); | ||
53 | } | ||
54 | |||
55 | static inline void xsc3_l2_inv_mva(unsigned long addr) | 52 | static inline void xsc3_l2_inv_mva(unsigned long addr) |
56 | { | 53 | { |
57 | __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); | 54 | __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); |
58 | } | 55 | } |
59 | 56 | ||
60 | static inline void xsc3_l2_inv_pa(unsigned long addr) | ||
61 | { | ||
62 | xsc3_l2_inv_mva(__phys_to_virt(addr)); | ||
63 | } | ||
64 | |||
65 | static inline void xsc3_l2_inv_all(void) | 57 | static inline void xsc3_l2_inv_all(void) |
66 | { | 58 | { |
67 | unsigned long l2ctype, set_way; | 59 | unsigned long l2ctype, set_way; |
@@ -79,50 +71,103 @@ static inline void xsc3_l2_inv_all(void) | |||
79 | dsb(); | 71 | dsb(); |
80 | } | 72 | } |
81 | 73 | ||
74 | #ifdef CONFIG_HIGHMEM | ||
75 | #define l2_map_save_flags(x) raw_local_save_flags(x) | ||
76 | #define l2_map_restore_flags(x) raw_local_irq_restore(x) | ||
77 | #else | ||
78 | #define l2_map_save_flags(x) ((x) = 0) | ||
79 | #define l2_map_restore_flags(x) ((void)(x)) | ||
80 | #endif | ||
81 | |||
82 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | ||
83 | unsigned long flags) | ||
84 | { | ||
85 | #ifdef CONFIG_HIGHMEM | ||
86 | unsigned long va = prev_va & PAGE_MASK; | ||
87 | unsigned long pa_offset = pa << (32 - PAGE_SHIFT); | ||
88 | if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) { | ||
89 | /* | ||
90 | * Switching to a new page. Because cache ops are | ||
91 | * using virtual addresses only, we must put a mapping | ||
92 | * in place for it. We also enable interrupts for a | ||
93 | * short while and disable them again to protect this | ||
94 | * mapping. | ||
95 | */ | ||
96 | unsigned long idx; | ||
97 | raw_local_irq_restore(flags); | ||
98 | idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | ||
99 | va = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
100 | raw_local_irq_restore(flags | PSR_I_BIT); | ||
101 | set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
102 | local_flush_tlb_kernel_page(va); | ||
103 | } | ||
104 | return va + (pa_offset >> (32 - PAGE_SHIFT)); | ||
105 | #else | ||
106 | return __phys_to_virt(pa); | ||
107 | #endif | ||
108 | } | ||
109 | |||
82 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | 110 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) |
83 | { | 111 | { |
112 | unsigned long vaddr, flags; | ||
113 | |||
84 | if (start == 0 && end == -1ul) { | 114 | if (start == 0 && end == -1ul) { |
85 | xsc3_l2_inv_all(); | 115 | xsc3_l2_inv_all(); |
86 | return; | 116 | return; |
87 | } | 117 | } |
88 | 118 | ||
119 | vaddr = -1; /* to force the first mapping */ | ||
120 | l2_map_save_flags(flags); | ||
121 | |||
89 | /* | 122 | /* |
90 | * Clean and invalidate partial first cache line. | 123 | * Clean and invalidate partial first cache line. |
91 | */ | 124 | */ |
92 | if (start & (CACHE_LINE_SIZE - 1)) { | 125 | if (start & (CACHE_LINE_SIZE - 1)) { |
93 | xsc3_l2_clean_pa(start & ~(CACHE_LINE_SIZE - 1)); | 126 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); |
94 | xsc3_l2_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); | 127 | xsc3_l2_clean_mva(vaddr); |
128 | xsc3_l2_inv_mva(vaddr); | ||
95 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | 129 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; |
96 | } | 130 | } |
97 | 131 | ||
98 | /* | 132 | /* |
99 | * Clean and invalidate partial last cache line. | 133 | * Invalidate all full cache lines between 'start' and 'end'. |
100 | */ | 134 | */ |
101 | if (start < end && (end & (CACHE_LINE_SIZE - 1))) { | 135 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { |
102 | xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); | 136 | vaddr = l2_map_va(start, vaddr, flags); |
103 | xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); | 137 | xsc3_l2_inv_mva(vaddr); |
104 | end &= ~(CACHE_LINE_SIZE - 1); | 138 | start += CACHE_LINE_SIZE; |
105 | } | 139 | } |
106 | 140 | ||
107 | /* | 141 | /* |
108 | * Invalidate all full cache lines between 'start' and 'end'. | 142 | * Clean and invalidate partial last cache line. |
109 | */ | 143 | */ |
110 | while (start < end) { | 144 | if (start < end) { |
111 | xsc3_l2_inv_pa(start); | 145 | vaddr = l2_map_va(start, vaddr, flags); |
112 | start += CACHE_LINE_SIZE; | 146 | xsc3_l2_clean_mva(vaddr); |
147 | xsc3_l2_inv_mva(vaddr); | ||
113 | } | 148 | } |
114 | 149 | ||
150 | l2_map_restore_flags(flags); | ||
151 | |||
115 | dsb(); | 152 | dsb(); |
116 | } | 153 | } |
117 | 154 | ||
118 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) | 155 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) |
119 | { | 156 | { |
157 | unsigned long vaddr, flags; | ||
158 | |||
159 | vaddr = -1; /* to force the first mapping */ | ||
160 | l2_map_save_flags(flags); | ||
161 | |||
120 | start &= ~(CACHE_LINE_SIZE - 1); | 162 | start &= ~(CACHE_LINE_SIZE - 1); |
121 | while (start < end) { | 163 | while (start < end) { |
122 | xsc3_l2_clean_pa(start); | 164 | vaddr = l2_map_va(start, vaddr, flags); |
165 | xsc3_l2_clean_mva(vaddr); | ||
123 | start += CACHE_LINE_SIZE; | 166 | start += CACHE_LINE_SIZE; |
124 | } | 167 | } |
125 | 168 | ||
169 | l2_map_restore_flags(flags); | ||
170 | |||
126 | dsb(); | 171 | dsb(); |
127 | } | 172 | } |
128 | 173 | ||
@@ -148,18 +193,26 @@ static inline void xsc3_l2_flush_all(void) | |||
148 | 193 | ||
149 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | 194 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) |
150 | { | 195 | { |
196 | unsigned long vaddr, flags; | ||
197 | |||
151 | if (start == 0 && end == -1ul) { | 198 | if (start == 0 && end == -1ul) { |
152 | xsc3_l2_flush_all(); | 199 | xsc3_l2_flush_all(); |
153 | return; | 200 | return; |
154 | } | 201 | } |
155 | 202 | ||
203 | vaddr = -1; /* to force the first mapping */ | ||
204 | l2_map_save_flags(flags); | ||
205 | |||
156 | start &= ~(CACHE_LINE_SIZE - 1); | 206 | start &= ~(CACHE_LINE_SIZE - 1); |
157 | while (start < end) { | 207 | while (start < end) { |
158 | xsc3_l2_clean_pa(start); | 208 | vaddr = l2_map_va(start, vaddr, flags); |
159 | xsc3_l2_inv_pa(start); | 209 | xsc3_l2_clean_mva(vaddr); |
210 | xsc3_l2_inv_mva(vaddr); | ||
160 | start += CACHE_LINE_SIZE; | 211 | start += CACHE_LINE_SIZE; |
161 | } | 212 | } |
162 | 213 | ||
214 | l2_map_restore_flags(flags); | ||
215 | |||
163 | dsb(); | 216 | dsb(); |
164 | } | 217 | } |
165 | 218 | ||