aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/microblaze/kernel/cpu/cache.c204
1 files changed, 161 insertions, 43 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 13f0c1de3234..f04d8a86dead 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -15,25 +15,6 @@
15#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
16#include <asm/pvr.h> 16#include <asm/pvr.h>
17 17
18static inline void __invalidate_flush_icache(unsigned int addr)
19{
20 __asm__ __volatile__ ("wic %0, r0;" \
21 : : "r" (addr));
22}
23
24static inline void __flush_dcache(unsigned int addr)
25{
26 __asm__ __volatile__ ("wdc.flush %0, r0;" \
27 : : "r" (addr));
28}
29
30static inline void __invalidate_dcache(unsigned int baseaddr,
31 unsigned int offset)
32{
33 __asm__ __volatile__ ("wdc.clear %0, %1;" \
34 : : "r" (baseaddr), "r" (offset));
35}
36
37static inline void __enable_icache_msr(void) 18static inline void __enable_icache_msr(void)
38{ 19{
39 __asm__ __volatile__ (" msrset r0, %0; \ 20 __asm__ __volatile__ (" msrset r0, %0; \
@@ -148,9 +129,9 @@ do { \
148 int step = -line_length; \ 129 int step = -line_length; \
149 BUG_ON(step >= 0); \ 130 BUG_ON(step >= 0); \
150 \ 131 \
151 __asm__ __volatile__ (" 1: " #op " r0, %0; \ 132 __asm__ __volatile__ (" 1: " #op " r0, %0; \
152 bgtid %0, 1b; \ 133 bgtid %0, 1b; \
153 addk %0, %0, %1; \ 134 addk %0, %0, %1; \
154 " : : "r" (len), "r" (step) \ 135 " : : "r" (len), "r" (step) \
155 : "memory"); \ 136 : "memory"); \
156} while (0); 137} while (0);
@@ -162,9 +143,9 @@ do { \
162 int count = end - start; \ 143 int count = end - start; \
163 BUG_ON(count <= 0); \ 144 BUG_ON(count <= 0); \
164 \ 145 \
165 __asm__ __volatile__ (" 1: " #op " %0, %1; \ 146 __asm__ __volatile__ (" 1: " #op " %0, %1; \
166 bgtid %1, 1b; \ 147 bgtid %1, 1b; \
167 addk %1, %1, %2; \ 148 addk %1, %1, %2; \
168 " : : "r" (start), "r" (count), \ 149 " : : "r" (start), "r" (count), \
169 "r" (step) : "memory"); \ 150 "r" (step) : "memory"); \
170} while (0); 151} while (0);
@@ -175,7 +156,7 @@ do { \
175 int volatile temp; \ 156 int volatile temp; \
176 BUG_ON(end - start <= 0); \ 157 BUG_ON(end - start <= 0); \
177 \ 158 \
178 __asm__ __volatile__ (" 1: " #op " %1, r0; \ 159 __asm__ __volatile__ (" 1: " #op " %1, r0; \
179 cmpu %0, %1, %2; \ 160 cmpu %0, %1, %2; \
180 bgtid %0, 1b; \ 161 bgtid %0, 1b; \
181 addk %1, %1, %3; \ 162 addk %1, %1, %3; \
@@ -183,10 +164,14 @@ do { \
183 "r" (line_length) : "memory"); \ 164 "r" (line_length) : "memory"); \
184} while (0); 165} while (0);
185 166
167#define ASM_LOOP
168
186static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 169static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
187{ 170{
188 unsigned long flags; 171 unsigned long flags;
189 172#ifndef ASM_LOOP
173 int i;
174#endif
190 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 175 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
191 (unsigned int)start, (unsigned int) end); 176 (unsigned int)start, (unsigned int) end);
192 177
@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
196 local_irq_save(flags); 181 local_irq_save(flags);
197 __disable_icache_msr(); 182 __disable_icache_msr();
198 183
184#ifdef ASM_LOOP
199 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 185 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
200 186#else
187 for (i = start; i < end; i += cpuinfo.icache_line_length)
188 __asm__ __volatile__ ("wic %0, r0;" \
189 : : "r" (i));
190#endif
201 __enable_icache_msr(); 191 __enable_icache_msr();
202 local_irq_restore(flags); 192 local_irq_restore(flags);
203} 193}
@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
206 unsigned long end) 196 unsigned long end)
207{ 197{
208 unsigned long flags; 198 unsigned long flags;
209 199#ifndef ASM_LOOP
200 int i;
201#endif
210 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 202 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
211 (unsigned int)start, (unsigned int) end); 203 (unsigned int)start, (unsigned int) end);
212 204
@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
216 local_irq_save(flags); 208 local_irq_save(flags);
217 __disable_icache_nomsr(); 209 __disable_icache_nomsr();
218 210
211#ifdef ASM_LOOP
219 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 212 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
213#else
214 for (i = start; i < end; i += cpuinfo.icache_line_length)
215 __asm__ __volatile__ ("wic %0, r0;" \
216 : : "r" (i));
217#endif
220 218
221 __enable_icache_nomsr(); 219 __enable_icache_nomsr();
222 local_irq_restore(flags); 220 local_irq_restore(flags);
@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
225static void __flush_icache_range_noirq(unsigned long start, 223static void __flush_icache_range_noirq(unsigned long start,
226 unsigned long end) 224 unsigned long end)
227{ 225{
226#ifndef ASM_LOOP
227 int i;
228#endif
228 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
229 (unsigned int)start, (unsigned int) end); 230 (unsigned int)start, (unsigned int) end);
230 231
231 CACHE_LOOP_LIMITS(start, end, 232 CACHE_LOOP_LIMITS(start, end,
232 cpuinfo.icache_line_length, cpuinfo.icache_size); 233 cpuinfo.icache_line_length, cpuinfo.icache_size);
234#ifdef ASM_LOOP
233 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 235 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
236#else
237 for (i = start; i < end; i += cpuinfo.icache_line_length)
238 __asm__ __volatile__ ("wic %0, r0;" \
239 : : "r" (i));
240#endif
234} 241}
235 242
236static void __flush_icache_all_msr_irq(void) 243static void __flush_icache_all_msr_irq(void)
237{ 244{
238 unsigned long flags; 245 unsigned long flags;
239 246#ifndef ASM_LOOP
247 int i;
248#endif
240 pr_debug("%s\n", __func__); 249 pr_debug("%s\n", __func__);
241 250
242 local_irq_save(flags); 251 local_irq_save(flags);
243 __disable_icache_msr(); 252 __disable_icache_msr();
244 253#ifdef ASM_LOOP
245 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 254 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
246 255#else
256 for (i = 0; i < cpuinfo.icache_size;
257 i += cpuinfo.icache_line_length)
258 __asm__ __volatile__ ("wic %0, r0;" \
259 : : "r" (i));
260#endif
247 __enable_icache_msr(); 261 __enable_icache_msr();
248 local_irq_restore(flags); 262 local_irq_restore(flags);
249} 263}
@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
251static void __flush_icache_all_nomsr_irq(void) 265static void __flush_icache_all_nomsr_irq(void)
252{ 266{
253 unsigned long flags; 267 unsigned long flags;
254 268#ifndef ASM_LOOP
269 int i;
270#endif
255 pr_debug("%s\n", __func__); 271 pr_debug("%s\n", __func__);
256 272
257 local_irq_save(flags); 273 local_irq_save(flags);
258 __disable_icache_nomsr(); 274 __disable_icache_nomsr();
259 275#ifdef ASM_LOOP
260 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 276 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
261 277#else
278 for (i = 0; i < cpuinfo.icache_size;
279 i += cpuinfo.icache_line_length)
280 __asm__ __volatile__ ("wic %0, r0;" \
281 : : "r" (i));
282#endif
262 __enable_icache_nomsr(); 283 __enable_icache_nomsr();
263 local_irq_restore(flags); 284 local_irq_restore(flags);
264} 285}
265 286
266static void __flush_icache_all_noirq(void) 287static void __flush_icache_all_noirq(void)
267{ 288{
289#ifndef ASM_LOOP
290 int i;
291#endif
268 pr_debug("%s\n", __func__); 292 pr_debug("%s\n", __func__);
293#ifdef ASM_LOOP
269 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 294 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
295#else
296 for (i = 0; i < cpuinfo.icache_size;
297 i += cpuinfo.icache_line_length)
298 __asm__ __volatile__ ("wic %0, r0;" \
299 : : "r" (i));
300#endif
270} 301}
271 302
272static void __invalidate_dcache_all_msr_irq(void) 303static void __invalidate_dcache_all_msr_irq(void)
273{ 304{
274 unsigned long flags; 305 unsigned long flags;
275 306#ifndef ASM_LOOP
307 int i;
308#endif
276 pr_debug("%s\n", __func__); 309 pr_debug("%s\n", __func__);
277 310
278 local_irq_save(flags); 311 local_irq_save(flags);
279 __disable_dcache_msr(); 312 __disable_dcache_msr();
280 313#ifdef ASM_LOOP
281 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 314 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
282 315#else
316 for (i = 0; i < cpuinfo.dcache_size;
317 i += cpuinfo.dcache_line_length)
318 __asm__ __volatile__ ("wdc %0, r0;" \
319 : : "r" (i));
320#endif
283 __enable_dcache_msr(); 321 __enable_dcache_msr();
284 local_irq_restore(flags); 322 local_irq_restore(flags);
285} 323}
@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
287static void __invalidate_dcache_all_nomsr_irq(void) 325static void __invalidate_dcache_all_nomsr_irq(void)
288{ 326{
289 unsigned long flags; 327 unsigned long flags;
290 328#ifndef ASM_LOOP
329 int i;
330#endif
291 pr_debug("%s\n", __func__); 331 pr_debug("%s\n", __func__);
292 332
293 local_irq_save(flags); 333 local_irq_save(flags);
294 __disable_dcache_nomsr(); 334 __disable_dcache_nomsr();
295 335#ifdef ASM_LOOP
296 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 336 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
297 337#else
338 for (i = 0; i < cpuinfo.dcache_size;
339 i += cpuinfo.dcache_line_length)
340 __asm__ __volatile__ ("wdc %0, r0;" \
341 : : "r" (i));
342#endif
298 __enable_dcache_nomsr(); 343 __enable_dcache_nomsr();
299 local_irq_restore(flags); 344 local_irq_restore(flags);
300} 345}
301 346
302static void __invalidate_dcache_all_noirq_wt(void) 347static void __invalidate_dcache_all_noirq_wt(void)
303{ 348{
349#ifndef ASM_LOOP
350 int i;
351#endif
304 pr_debug("%s\n", __func__); 352 pr_debug("%s\n", __func__);
353#ifdef ASM_LOOP
305 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) 354 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
355#else
356 for (i = 0; i < cpuinfo.dcache_size;
357 i += cpuinfo.dcache_line_length)
358 __asm__ __volatile__ ("wdc %0, r0;" \
359 : : "r" (i));
360#endif
306} 361}
307 362
308/* FIXME this is weird - should be only wdc but not work 363/* FIXME this is weird - should be only wdc but not work
309 * MS: I am getting bus errors and other weird things */ 364 * MS: I am getting bus errors and other weird things */
310static void __invalidate_dcache_all_wb(void) 365static void __invalidate_dcache_all_wb(void)
311{ 366{
367#ifndef ASM_LOOP
368 int i;
369#endif
312 pr_debug("%s\n", __func__); 370 pr_debug("%s\n", __func__);
371#ifdef ASM_LOOP
313 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 372 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
314 wdc.clear) 373 wdc.clear)
374#else
375 for (i = 0; i < cpuinfo.dcache_size;
376 i += cpuinfo.dcache_line_length)
377 __asm__ __volatile__ ("wdc.clear %0, r0;" \
378 : : "r" (i));
379#endif
315} 380}
316 381
317static void __invalidate_dcache_range_wb(unsigned long start, 382static void __invalidate_dcache_range_wb(unsigned long start,
318 unsigned long end) 383 unsigned long end)
319{ 384{
385#ifndef ASM_LOOP
386 int i;
387#endif
320 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 388 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
321 (unsigned int)start, (unsigned int) end); 389 (unsigned int)start, (unsigned int) end);
322 390
323 CACHE_LOOP_LIMITS(start, end, 391 CACHE_LOOP_LIMITS(start, end,
324 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 392 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
393#ifdef ASM_LOOP
325 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); 394 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
395#else
396 for (i = start; i < end; i += cpuinfo.icache_line_length)
397 __asm__ __volatile__ ("wdc.clear %0, r0;" \
398 : : "r" (i));
399#endif
326} 400}
327 401
328static void __invalidate_dcache_range_nomsr_wt(unsigned long start, 402static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
329 unsigned long end) 403 unsigned long end)
330{ 404{
405#ifndef ASM_LOOP
406 int i;
407#endif
331 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 408 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
332 (unsigned int)start, (unsigned int) end); 409 (unsigned int)start, (unsigned int) end);
333 CACHE_LOOP_LIMITS(start, end, 410 CACHE_LOOP_LIMITS(start, end,
334 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 411 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
335 412
413#ifdef ASM_LOOP
336 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 414 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
415#else
416 for (i = start; i < end; i += cpuinfo.icache_line_length)
417 __asm__ __volatile__ ("wdc %0, r0;" \
418 : : "r" (i));
419#endif
337} 420}
338 421
339static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, 422static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
340 unsigned long end) 423 unsigned long end)
341{ 424{
342 unsigned long flags; 425 unsigned long flags;
343 426#ifndef ASM_LOOP
427 int i;
428#endif
344 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 429 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
345 (unsigned int)start, (unsigned int) end); 430 (unsigned int)start, (unsigned int) end);
346 CACHE_LOOP_LIMITS(start, end, 431 CACHE_LOOP_LIMITS(start, end,
@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
349 local_irq_save(flags); 434 local_irq_save(flags);
350 __disable_dcache_msr(); 435 __disable_dcache_msr();
351 436
437#ifdef ASM_LOOP
352 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 438 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
439#else
440 for (i = start; i < end; i += cpuinfo.icache_line_length)
441 __asm__ __volatile__ ("wdc %0, r0;" \
442 : : "r" (i));
443#endif
353 444
354 __enable_dcache_msr(); 445 __enable_dcache_msr();
355 local_irq_restore(flags); 446 local_irq_restore(flags);
@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
359 unsigned long end) 450 unsigned long end)
360{ 451{
361 unsigned long flags; 452 unsigned long flags;
362 453#ifndef ASM_LOOP
454 int i;
455#endif
363 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 456 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
364 (unsigned int)start, (unsigned int) end); 457 (unsigned int)start, (unsigned int) end);
365 458
@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
369 local_irq_save(flags); 462 local_irq_save(flags);
370 __disable_dcache_nomsr(); 463 __disable_dcache_nomsr();
371 464
465#ifdef ASM_LOOP
372 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 466 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
467#else
468 for (i = start; i < end; i += cpuinfo.icache_line_length)
469 __asm__ __volatile__ ("wdc %0, r0;" \
470 : : "r" (i));
471#endif
373 472
374 __enable_dcache_nomsr(); 473 __enable_dcache_nomsr();
375 local_irq_restore(flags); 474 local_irq_restore(flags);
@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
377 476
378static void __flush_dcache_all_wb(void) 477static void __flush_dcache_all_wb(void)
379{ 478{
479#ifndef ASM_LOOP
480 int i;
481#endif
380 pr_debug("%s\n", __func__); 482 pr_debug("%s\n", __func__);
483#ifdef ASM_LOOP
381 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 484 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
382 wdc.flush); 485 wdc.flush);
486#else
487 for (i = 0; i < cpuinfo.dcache_size;
488 i += cpuinfo.dcache_line_length)
489 __asm__ __volatile__ ("wdc.flush %0, r0;" \
490 : : "r" (i));
491#endif
383} 492}
384 493
385static void __flush_dcache_range_wb(unsigned long start, unsigned long end) 494static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
386{ 495{
496#ifndef ASM_LOOP
497 int i;
498#endif
387 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 499 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
388 (unsigned int)start, (unsigned int) end); 500 (unsigned int)start, (unsigned int) end);
389 501
390 CACHE_LOOP_LIMITS(start, end, 502 CACHE_LOOP_LIMITS(start, end,
391 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 503 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
504#ifdef ASM_LOOP
392 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); 505 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
506#else
507 for (i = start; i < end; i += cpuinfo.icache_line_length)
508 __asm__ __volatile__ ("wdc.flush %0, r0;" \
509 : : "r" (i));
510#endif
393} 511}
394 512
395/* struct for wb caches and for wt caches */ 513/* struct for wb caches and for wt caches */