diff options
Diffstat (limited to 'arch/microblaze/kernel/cpu/cache.c')
-rw-r--r-- | arch/microblaze/kernel/cpu/cache.c | 255 |
1 files changed, 255 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c new file mode 100644 index 000000000000..af866a450125 --- /dev/null +++ b/arch/microblaze/kernel/cpu/cache.c | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * Cache control for MicroBlaze cache memories | ||
3 | * | ||
4 | * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> | ||
5 | * Copyright (C) 2007-2009 PetaLogix | ||
6 | * Copyright (C) 2007 John Williams <john.williams@petalogix.com> | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General | ||
9 | * Public License. See the file COPYING in the main directory of this | ||
10 | * archive for more details. | ||
11 | */ | ||
12 | |||
13 | #include <asm/cacheflush.h> | ||
14 | #include <linux/cache.h> | ||
15 | #include <asm/cpuinfo.h> | ||
16 | |||
17 | /* Exported functions */ | ||
18 | |||
19 | void _enable_icache(void) | ||
20 | { | ||
21 | if (cpuinfo.use_icache) { | ||
22 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | ||
23 | __asm__ __volatile__ (" \ | ||
24 | msrset r0, %0; \ | ||
25 | nop; " \ | ||
26 | : \ | ||
27 | : "i" (MSR_ICE) \ | ||
28 | : "memory"); | ||
29 | #else | ||
30 | __asm__ __volatile__ (" \ | ||
31 | mfs r12, rmsr; \ | ||
32 | nop; \ | ||
33 | ori r12, r12, %0; \ | ||
34 | mts rmsr, r12; \ | ||
35 | nop; " \ | ||
36 | : \ | ||
37 | : "i" (MSR_ICE) \ | ||
38 | : "memory", "r12"); | ||
39 | #endif | ||
40 | } | ||
41 | } | ||
42 | |||
43 | void _disable_icache(void) | ||
44 | { | ||
45 | if (cpuinfo.use_icache) { | ||
46 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | ||
47 | __asm__ __volatile__ (" \ | ||
48 | msrclr r0, %0; \ | ||
49 | nop; " \ | ||
50 | : \ | ||
51 | : "i" (MSR_ICE) \ | ||
52 | : "memory"); | ||
53 | #else | ||
54 | __asm__ __volatile__ (" \ | ||
55 | mfs r12, rmsr; \ | ||
56 | nop; \ | ||
57 | andi r12, r12, ~%0; \ | ||
58 | mts rmsr, r12; \ | ||
59 | nop; " \ | ||
60 | : \ | ||
61 | : "i" (MSR_ICE) \ | ||
62 | : "memory", "r12"); | ||
63 | #endif | ||
64 | } | ||
65 | } | ||
66 | |||
67 | void _invalidate_icache(unsigned int addr) | ||
68 | { | ||
69 | if (cpuinfo.use_icache) { | ||
70 | __asm__ __volatile__ (" \ | ||
71 | wic %0, r0" \ | ||
72 | : \ | ||
73 | : "r" (addr)); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void _enable_dcache(void) | ||
78 | { | ||
79 | if (cpuinfo.use_dcache) { | ||
80 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | ||
81 | __asm__ __volatile__ (" \ | ||
82 | msrset r0, %0; \ | ||
83 | nop; " \ | ||
84 | : \ | ||
85 | : "i" (MSR_DCE) \ | ||
86 | : "memory"); | ||
87 | #else | ||
88 | __asm__ __volatile__ (" \ | ||
89 | mfs r12, rmsr; \ | ||
90 | nop; \ | ||
91 | ori r12, r12, %0; \ | ||
92 | mts rmsr, r12; \ | ||
93 | nop; " \ | ||
94 | : \ | ||
95 | : "i" (MSR_DCE) \ | ||
96 | : "memory", "r12"); | ||
97 | #endif | ||
98 | } | ||
99 | } | ||
100 | |||
101 | void _disable_dcache(void) | ||
102 | { | ||
103 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | ||
104 | __asm__ __volatile__ (" \ | ||
105 | msrclr r0, %0; \ | ||
106 | nop; " \ | ||
107 | : \ | ||
108 | : "i" (MSR_DCE) \ | ||
109 | : "memory"); | ||
110 | #else | ||
111 | __asm__ __volatile__ (" \ | ||
112 | mfs r12, rmsr; \ | ||
113 | nop; \ | ||
114 | andi r12, r12, ~%0; \ | ||
115 | mts rmsr, r12; \ | ||
116 | nop; " \ | ||
117 | : \ | ||
118 | : "i" (MSR_DCE) \ | ||
119 | : "memory", "r12"); | ||
120 | #endif | ||
121 | } | ||
122 | |||
123 | void _invalidate_dcache(unsigned int addr) | ||
124 | { | ||
125 | __asm__ __volatile__ (" \ | ||
126 | wdc %0, r0" \ | ||
127 | : \ | ||
128 | : "r" (addr)); | ||
129 | } | ||
130 | |||
131 | void __invalidate_icache_all(void) | ||
132 | { | ||
133 | unsigned int i; | ||
134 | unsigned flags; | ||
135 | |||
136 | if (cpuinfo.use_icache) { | ||
137 | local_irq_save(flags); | ||
138 | __disable_icache(); | ||
139 | |||
140 | /* Just loop through cache size and invalidate, no need to add | ||
141 | CACHE_BASE address */ | ||
142 | for (i = 0; i < cpuinfo.icache_size; | ||
143 | i += cpuinfo.icache_line) | ||
144 | __invalidate_icache(i); | ||
145 | |||
146 | __enable_icache(); | ||
147 | local_irq_restore(flags); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | void __invalidate_icache_range(unsigned long start, unsigned long end) | ||
152 | { | ||
153 | unsigned int i; | ||
154 | unsigned flags; | ||
155 | unsigned int align; | ||
156 | |||
157 | if (cpuinfo.use_icache) { | ||
158 | /* | ||
159 | * No need to cover entire cache range, | ||
160 | * just cover cache footprint | ||
161 | */ | ||
162 | end = min(start + cpuinfo.icache_size, end); | ||
163 | align = ~(cpuinfo.icache_line - 1); | ||
164 | start &= align; /* Make sure we are aligned */ | ||
165 | /* Push end up to the next cache line */ | ||
166 | end = ((end & align) + cpuinfo.icache_line); | ||
167 | |||
168 | local_irq_save(flags); | ||
169 | __disable_icache(); | ||
170 | |||
171 | for (i = start; i < end; i += cpuinfo.icache_line) | ||
172 | __invalidate_icache(i); | ||
173 | |||
174 | __enable_icache(); | ||
175 | local_irq_restore(flags); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page) | ||
180 | { | ||
181 | __invalidate_icache_all(); | ||
182 | } | ||
183 | |||
184 | void __invalidate_icache_user_range(struct vm_area_struct *vma, | ||
185 | struct page *page, unsigned long adr, | ||
186 | int len) | ||
187 | { | ||
188 | __invalidate_icache_all(); | ||
189 | } | ||
190 | |||
191 | void __invalidate_cache_sigtramp(unsigned long addr) | ||
192 | { | ||
193 | __invalidate_icache_range(addr, addr + 8); | ||
194 | } | ||
195 | |||
196 | void __invalidate_dcache_all(void) | ||
197 | { | ||
198 | unsigned int i; | ||
199 | unsigned flags; | ||
200 | |||
201 | if (cpuinfo.use_dcache) { | ||
202 | local_irq_save(flags); | ||
203 | __disable_dcache(); | ||
204 | |||
205 | /* | ||
206 | * Just loop through cache size and invalidate, | ||
207 | * no need to add CACHE_BASE address | ||
208 | */ | ||
209 | for (i = 0; i < cpuinfo.dcache_size; | ||
210 | i += cpuinfo.dcache_line) | ||
211 | __invalidate_dcache(i); | ||
212 | |||
213 | __enable_dcache(); | ||
214 | local_irq_restore(flags); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | void __invalidate_dcache_range(unsigned long start, unsigned long end) | ||
219 | { | ||
220 | unsigned int i; | ||
221 | unsigned flags; | ||
222 | unsigned int align; | ||
223 | |||
224 | if (cpuinfo.use_dcache) { | ||
225 | /* | ||
226 | * No need to cover entire cache range, | ||
227 | * just cover cache footprint | ||
228 | */ | ||
229 | end = min(start + cpuinfo.dcache_size, end); | ||
230 | align = ~(cpuinfo.dcache_line - 1); | ||
231 | start &= align; /* Make sure we are aligned */ | ||
232 | /* Push end up to the next cache line */ | ||
233 | end = ((end & align) + cpuinfo.dcache_line); | ||
234 | local_irq_save(flags); | ||
235 | __disable_dcache(); | ||
236 | |||
237 | for (i = start; i < end; i += cpuinfo.dcache_line) | ||
238 | __invalidate_dcache(i); | ||
239 | |||
240 | __enable_dcache(); | ||
241 | local_irq_restore(flags); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page) | ||
246 | { | ||
247 | __invalidate_dcache_all(); | ||
248 | } | ||
249 | |||
250 | void __invalidate_dcache_user_range(struct vm_area_struct *vma, | ||
251 | struct page *page, unsigned long adr, | ||
252 | int len) | ||
253 | { | ||
254 | __invalidate_dcache_all(); | ||
255 | } | ||