aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel/cpu
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2009-12-10 05:43:57 -0500
committerMichal Simek <monstr@monstr.eu>2009-12-14 02:45:10 -0500
commit2ee2ff875a4d3bdb941e2bb1173cd927c09d5a67 (patch)
treea1ec4db3055527a2814cbdb006652dbf0885b348 /arch/microblaze/kernel/cpu
parentc8983a5c6ecc5ca68a871c44bc35f714663a4dfa (diff)
microblaze: Support for WB cache
Microblaze version 7.20.d is the first MB version which can be run on MMU linux. Please do not used previous version because they contain HW bug. Based on WB support was necessary to redesign whole cache design. Microblaze versions from 7.20.a don't need to disable IRQ and cache before working with them that's why there are special structures for it. Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/kernel/cpu')
-rw-r--r--arch/microblaze/kernel/cpu/cache.c663
1 files changed, 477 insertions, 186 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 538f1df6761..d9d63831cc2 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this 9 * Public License. See the file COPYING in the main directory of this
@@ -13,243 +13,534 @@
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <linux/cache.h> 14#include <linux/cache.h>
15#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
16#include <asm/pvr.h>
16 17
17/* Exported functions */ 18static inline void __invalidate_flush_icache(unsigned int addr)
19{
20 __asm__ __volatile__ ("wic %0, r0;" \
21 : : "r" (addr));
22}
23
24static inline void __flush_dcache(unsigned int addr)
25{
26 __asm__ __volatile__ ("wdc.flush %0, r0;" \
27 : : "r" (addr));
28}
29
30static inline void __invalidate_dcache(unsigned int baseaddr,
31 unsigned int offset)
32{
33 __asm__ __volatile__ ("wdc.clear %0, %1;" \
34 : : "r" (baseaddr), "r" (offset));
35}
18 36
19void _enable_icache(void) 37static inline void __enable_icache_msr(void)
20{ 38{
21 if (cpuinfo.use_icache) { 39 __asm__ __volatile__ (" msrset r0, %0; \
22#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 40 nop; " \
23 __asm__ __volatile__ (" \ 41 : : "i" (MSR_ICE) : "memory");
24 msrset r0, %0; \ 42}
25 nop; " \ 43
26 : \ 44static inline void __disable_icache_msr(void)
27 : "i" (MSR_ICE) \ 45{
46 __asm__ __volatile__ (" msrclr r0, %0; \
47 nop; " \
48 : : "i" (MSR_ICE) : "memory");
49}
50
51static inline void __enable_dcache_msr(void)
52{
53 __asm__ __volatile__ (" msrset r0, %0; \
54 nop; " \
55 : \
56 : "i" (MSR_DCE) \
28 : "memory"); 57 : "memory");
29#else
30 __asm__ __volatile__ (" \
31 mfs r12, rmsr; \
32 nop; \
33 ori r12, r12, %0; \
34 mts rmsr, r12; \
35 nop; " \
36 : \
37 : "i" (MSR_ICE) \
38 : "memory", "r12");
39#endif
40 }
41} 58}
42 59
43void _disable_icache(void) 60static inline void __disable_dcache_msr(void)
44{ 61{
45 if (cpuinfo.use_icache) { 62 __asm__ __volatile__ (" msrclr r0, %0; \
46#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 63 nop; " \
47 __asm__ __volatile__ (" \ 64 : \
48 msrclr r0, %0; \ 65 : "i" (MSR_DCE) \
49 nop; " \
50 : \
51 : "i" (MSR_ICE) \
52 : "memory"); 66 : "memory");
53#else 67}
54 __asm__ __volatile__ (" \ 68
55 mfs r12, rmsr; \ 69static inline void __enable_icache_nomsr(void)
56 nop; \ 70{
57 andi r12, r12, ~%0; \ 71 __asm__ __volatile__ (" mfs r12, rmsr; \
58 mts rmsr, r12; \ 72 nop; \
59 nop; " \ 73 ori r12, r12, %0; \
60 : \ 74 mts rmsr, r12; \
61 : "i" (MSR_ICE) \ 75 nop; " \
76 : \
77 : "i" (MSR_ICE) \
62 : "memory", "r12"); 78 : "memory", "r12");
63#endif
64 }
65} 79}
66 80
67void _invalidate_icache(unsigned int addr) 81static inline void __disable_icache_nomsr(void)
68{ 82{
69 if (cpuinfo.use_icache) { 83 __asm__ __volatile__ (" mfs r12, rmsr; \
70 __asm__ __volatile__ (" \ 84 nop; \
71 wic %0, r0" \ 85 andi r12, r12, ~%0; \
72 : \ 86 mts rmsr, r12; \
73 : "r" (addr)); 87 nop; " \
74 } 88 : \
89 : "i" (MSR_ICE) \
90 : "memory", "r12");
75} 91}
76 92
77void _enable_dcache(void) 93static inline void __enable_dcache_nomsr(void)
78{ 94{
79 if (cpuinfo.use_dcache) { 95 __asm__ __volatile__ (" mfs r12, rmsr; \
80#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 96 nop; \
81 __asm__ __volatile__ (" \ 97 ori r12, r12, %0; \
82 msrset r0, %0; \ 98 mts rmsr, r12; \
83 nop; " \ 99 nop; " \
84 : \ 100 : \
85 : "i" (MSR_DCE) \ 101 : "i" (MSR_DCE) \
86 : "memory");
87#else
88 __asm__ __volatile__ (" \
89 mfs r12, rmsr; \
90 nop; \
91 ori r12, r12, %0; \
92 mts rmsr, r12; \
93 nop; " \
94 : \
95 : "i" (MSR_DCE) \
96 : "memory", "r12"); 102 : "memory", "r12");
97#endif
98 }
99} 103}
100 104
101void _disable_dcache(void) 105static inline void __disable_dcache_nomsr(void)
102{ 106{
103#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 107 __asm__ __volatile__ (" mfs r12, rmsr; \
104 __asm__ __volatile__ (" \ 108 nop; \
105 msrclr r0, %0; \ 109 andi r12, r12, ~%0; \
106 nop; " \ 110 mts rmsr, r12; \
107 : \ 111 nop; " \
108 : "i" (MSR_DCE) \ 112 : \
109 : "memory"); 113 : "i" (MSR_DCE) \
110#else
111 __asm__ __volatile__ (" \
112 mfs r12, rmsr; \
113 nop; \
114 andi r12, r12, ~%0; \
115 mts rmsr, r12; \
116 nop; " \
117 : \
118 : "i" (MSR_DCE) \
119 : "memory", "r12"); 114 : "memory", "r12");
120#endif
121} 115}
122 116
123void _invalidate_dcache(unsigned int addr) 117
118/* Helper macro for computing the limits of cache range loops */
119#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
120do { \
121 int align = ~(cache_line_length - 1); \
122 end = min(start + cache_size, end); \
123 start &= align; \
124 end = ((end & align) + cache_line_length); \
125} while (0);
126
127/*
128 * Helper macro to loop over the specified cache_size/line_length and
129 * execute 'op' on that cacheline
130 */
131#define CACHE_ALL_LOOP(cache_size, line_length, op) \
132do { \
133 unsigned int len = cache_size; \
134 int step = -line_length; \
135 BUG_ON(step >= 0); \
136 \
137 __asm__ __volatile__ (" 1: " #op " %0, r0; \
138 bgtid %0, 1b; \
139 addk %0, %0, %1; \
140 " : : "r" (len), "r" (step) \
141 : "memory"); \
142} while (0);
143
144
145#define CACHE_ALL_LOOP2(cache_size, line_length, op) \
146do { \
147 unsigned int len = cache_size; \
148 int step = -line_length; \
149 BUG_ON(step >= 0); \
150 \
151 __asm__ __volatile__ (" 1: " #op " r0, %0; \
152 bgtid %0, 1b; \
153 addk %0, %0, %1; \
154 " : : "r" (len), "r" (step) \
155 : "memory"); \
156} while (0);
157
158/* for wdc.flush/clear */
159#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
160do { \
161 int step = -line_length; \
162 int count = end - start; \
163 BUG_ON(count <= 0); \
164 \
165 __asm__ __volatile__ (" 1: " #op " %0, %1; \
166 bgtid %1, 1b; \
167 addk %1, %1, %2; \
168 " : : "r" (start), "r" (count), \
169 "r" (step) : "memory"); \
170} while (0);
171
172/* It is used only first parameter for OP - for wic, wdc */
173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
174do { \
175 int step = -line_length; \
176 int count = end - start; \
177 BUG_ON(count <= 0); \
178 \
179 __asm__ __volatile__ (" 1: addk %0, %0, %1; \
180 " #op " %0, r0; \
181 bgtid %1, 1b; \
182 addk %1, %1, %2; \
183 " : : "r" (start), "r" (count), \
184 "r" (step) : "memory"); \
185} while (0);
186
187static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
124{ 188{
125 __asm__ __volatile__ (" \ 189 unsigned long flags;
126 wdc %0, r0" \ 190
127 : \ 191 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
128 : "r" (addr)); 192 (unsigned int)start, (unsigned int) end);
193
194 CACHE_LOOP_LIMITS(start, end,
195 cpuinfo.icache_line_length, cpuinfo.icache_size);
196
197 local_irq_save(flags);
198 __disable_icache_msr();
199
200 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
201
202 __enable_icache_msr();
203 local_irq_restore(flags);
129} 204}
130 205
131void __invalidate_icache_all(void) 206static void __flush_icache_range_nomsr_irq(unsigned long start,
207 unsigned long end)
132{ 208{
133 unsigned int i; 209 unsigned long flags;
134 unsigned flags;
135 210
136 if (cpuinfo.use_icache) { 211 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
137 local_irq_save(flags); 212 (unsigned int)start, (unsigned int) end);
138 __disable_icache();
139 213
140 /* Just loop through cache size and invalidate, no need to add 214 CACHE_LOOP_LIMITS(start, end,
141 CACHE_BASE address */ 215 cpuinfo.icache_line_length, cpuinfo.icache_size);
142 for (i = 0; i < cpuinfo.icache_size;
143 i += cpuinfo.icache_line_length)
144 __invalidate_icache(i);
145 216
146 __enable_icache(); 217 local_irq_save(flags);
147 local_irq_restore(flags); 218 __disable_icache_nomsr();
148 } 219
220 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
221
222 __enable_icache_nomsr();
223 local_irq_restore(flags);
149} 224}
150 225
151void __invalidate_icache_range(unsigned long start, unsigned long end) 226static void __flush_icache_range_noirq(unsigned long start,
227 unsigned long end)
152{ 228{
153 unsigned int i; 229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
154 unsigned flags; 230 (unsigned int)start, (unsigned int) end);
155 unsigned int align; 231
156 232 CACHE_LOOP_LIMITS(start, end,
157 if (cpuinfo.use_icache) { 233 cpuinfo.icache_line_length, cpuinfo.icache_size);
158 /* 234 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
159 * No need to cover entire cache range, 235}
160 * just cover cache footprint 236
161 */ 237static void __flush_icache_all_msr_irq(void)
162 end = min(start + cpuinfo.icache_size, end); 238{
163 align = ~(cpuinfo.icache_line_length - 1); 239 unsigned long flags;
164 start &= align; /* Make sure we are aligned */ 240
165 /* Push end up to the next cache line */ 241 pr_debug("%s\n", __func__);
166 end = ((end & align) + cpuinfo.icache_line_length); 242
167 243 local_irq_save(flags);
168 local_irq_save(flags); 244 __disable_icache_msr();
169 __disable_icache(); 245
170 246 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
171 for (i = start; i < end; i += cpuinfo.icache_line_length) 247
172 __invalidate_icache(i); 248 __enable_icache_msr();
173 249 local_irq_restore(flags);
174 __enable_icache(); 250}
175 local_irq_restore(flags); 251
176 } 252static void __flush_icache_all_nomsr_irq(void)
253{
254 unsigned long flags;
255
256 pr_debug("%s\n", __func__);
257
258 local_irq_save(flags);
259 __disable_icache_nomsr();
260
261 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
262
263 __enable_icache_nomsr();
264 local_irq_restore(flags);
177} 265}
178 266
179void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page) 267static void __flush_icache_all_noirq(void)
180{ 268{
181 __invalidate_icache_all(); 269 pr_debug("%s\n", __func__);
270 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
182} 271}
183 272
184void __invalidate_icache_user_range(struct vm_area_struct *vma, 273static void __invalidate_dcache_all_msr_irq(void)
185 struct page *page, unsigned long adr,
186 int len)
187{ 274{
188 __invalidate_icache_all(); 275 unsigned long flags;
276
277 pr_debug("%s\n", __func__);
278
279 local_irq_save(flags);
280 __disable_dcache_msr();
281
282 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
283
284 __enable_dcache_msr();
285 local_irq_restore(flags);
189} 286}
190 287
191void __invalidate_cache_sigtramp(unsigned long addr) 288static void __invalidate_dcache_all_nomsr_irq(void)
192{ 289{
193 __invalidate_icache_range(addr, addr + 8); 290 unsigned long flags;
291
292 pr_debug("%s\n", __func__);
293
294 local_irq_save(flags);
295 __disable_dcache_nomsr();
296
297 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
298
299 __enable_dcache_nomsr();
300 local_irq_restore(flags);
194} 301}
195 302
196void __invalidate_dcache_all(void) 303static void __invalidate_dcache_all_noirq_wt(void)
197{ 304{
198 unsigned int i; 305 pr_debug("%s\n", __func__);
199 unsigned flags; 306 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
200
201 if (cpuinfo.use_dcache) {
202 local_irq_save(flags);
203 __disable_dcache();
204
205 /*
206 * Just loop through cache size and invalidate,
207 * no need to add CACHE_BASE address
208 */
209 for (i = 0; i < cpuinfo.dcache_size;
210 i += cpuinfo.dcache_line_length)
211 __invalidate_dcache(i);
212
213 __enable_dcache();
214 local_irq_restore(flags);
215 }
216} 307}
217 308
218void __invalidate_dcache_range(unsigned long start, unsigned long end) 309/* FIXME this is weird - should be only wdc but not work
310 * MS: I am getting bus errors and other weird things */
311static void __invalidate_dcache_all_wb(void)
219{ 312{
313 pr_debug("%s\n", __func__);
314 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
315 wdc.clear)
316
317#if 0
220 unsigned int i; 318 unsigned int i;
221 unsigned flags; 319
222 unsigned int align; 320 pr_debug("%s\n", __func__);
223 321
224 if (cpuinfo.use_dcache) { 322 /* Just loop through cache size and invalidate it */
225 /* 323 for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
226 * No need to cover entire cache range, 324 __invalidate_dcache(0, i);
227 * just cover cache footprint 325#endif
228 */ 326}
229 end = min(start + cpuinfo.dcache_size, end); 327
230 align = ~(cpuinfo.dcache_line_length - 1); 328static void __invalidate_dcache_range_wb(unsigned long start,
231 start &= align; /* Make sure we are aligned */ 329 unsigned long end)
232 /* Push end up to the next cache line */ 330{
233 end = ((end & align) + cpuinfo.dcache_line_length); 331 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
234 local_irq_save(flags); 332 (unsigned int)start, (unsigned int) end);
235 __disable_dcache(); 333
236 334 CACHE_LOOP_LIMITS(start, end,
237 for (i = start; i < end; i += cpuinfo.dcache_line_length) 335 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
238 __invalidate_dcache(i); 336 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
239 337}
240 __enable_dcache(); 338
241 local_irq_restore(flags); 339static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
242 } 340 unsigned long end)
341{
342 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
343 (unsigned int)start, (unsigned int) end);
344 CACHE_LOOP_LIMITS(start, end,
345 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
346
347 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
243} 348}
244 349
245void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page) 350static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
351 unsigned long end)
246{ 352{
247 __invalidate_dcache_all(); 353 unsigned long flags;
354
355 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
356 (unsigned int)start, (unsigned int) end);
357 CACHE_LOOP_LIMITS(start, end,
358 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
359
360 local_irq_save(flags);
361 __disable_dcache_msr();
362
363 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
364
365 __enable_dcache_msr();
366 local_irq_restore(flags);
367}
368
369static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
370 unsigned long end)
371{
372 unsigned long flags;
373
374 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
375 (unsigned int)start, (unsigned int) end);
376
377 CACHE_LOOP_LIMITS(start, end,
378 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
379
380 local_irq_save(flags);
381 __disable_dcache_nomsr();
382
383 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
384
385 __enable_dcache_nomsr();
386 local_irq_restore(flags);
387}
388
389static void __flush_dcache_all_wb(void)
390{
391 pr_debug("%s\n", __func__);
392 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
393 wdc.flush);
248} 394}
249 395
250void __invalidate_dcache_user_range(struct vm_area_struct *vma, 396static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
251 struct page *page, unsigned long adr,
252 int len)
253{ 397{
254 __invalidate_dcache_all(); 398 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
399 (unsigned int)start, (unsigned int) end);
400
401 CACHE_LOOP_LIMITS(start, end,
402 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
403 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
404}
405
406/* struct for wb caches and for wt caches */
407struct scache *mbc;
408
409/* new wb cache model */
410const struct scache wb_msr = {
411 .ie = __enable_icache_msr,
412 .id = __disable_icache_msr,
413 .ifl = __flush_icache_all_noirq,
414 .iflr = __flush_icache_range_noirq,
415 .iin = __flush_icache_all_noirq,
416 .iinr = __flush_icache_range_noirq,
417 .de = __enable_dcache_msr,
418 .dd = __disable_dcache_msr,
419 .dfl = __flush_dcache_all_wb,
420 .dflr = __flush_dcache_range_wb,
421 .din = __invalidate_dcache_all_wb,
422 .dinr = __invalidate_dcache_range_wb,
423};
424
425/* There is only difference in ie, id, de, dd functions */
426const struct scache wb_nomsr = {
427 .ie = __enable_icache_nomsr,
428 .id = __disable_icache_nomsr,
429 .ifl = __flush_icache_all_noirq,
430 .iflr = __flush_icache_range_noirq,
431 .iin = __flush_icache_all_noirq,
432 .iinr = __flush_icache_range_noirq,
433 .de = __enable_dcache_nomsr,
434 .dd = __disable_dcache_nomsr,
435 .dfl = __flush_dcache_all_wb,
436 .dflr = __flush_dcache_range_wb,
437 .din = __invalidate_dcache_all_wb,
438 .dinr = __invalidate_dcache_range_wb,
439};
440
441/* Old wt cache model with disabling irq and turn off cache */
442const struct scache wt_msr = {
443 .ie = __enable_icache_msr,
444 .id = __disable_icache_msr,
445 .ifl = __flush_icache_all_msr_irq,
446 .iflr = __flush_icache_range_msr_irq,
447 .iin = __flush_icache_all_msr_irq,
448 .iinr = __flush_icache_range_msr_irq,
449 .de = __enable_dcache_msr,
450 .dd = __disable_dcache_msr,
451 .dfl = __invalidate_dcache_all_msr_irq,
452 .dflr = __invalidate_dcache_range_msr_irq_wt,
453 .din = __invalidate_dcache_all_msr_irq,
454 .dinr = __invalidate_dcache_range_msr_irq_wt,
455};
456
457const struct scache wt_nomsr = {
458 .ie = __enable_icache_nomsr,
459 .id = __disable_icache_nomsr,
460 .ifl = __flush_icache_all_nomsr_irq,
461 .iflr = __flush_icache_range_nomsr_irq,
462 .iin = __flush_icache_all_nomsr_irq,
463 .iinr = __flush_icache_range_nomsr_irq,
464 .de = __enable_dcache_nomsr,
465 .dd = __disable_dcache_nomsr,
466 .dfl = __invalidate_dcache_all_nomsr_irq,
467 .dflr = __invalidate_dcache_range_nomsr_irq,
468 .din = __invalidate_dcache_all_nomsr_irq,
469 .dinr = __invalidate_dcache_range_nomsr_irq,
470};
471
472/* New wt cache model for newer Microblaze versions */
473const struct scache wt_msr_noirq = {
474 .ie = __enable_icache_msr,
475 .id = __disable_icache_msr,
476 .ifl = __flush_icache_all_noirq,
477 .iflr = __flush_icache_range_noirq,
478 .iin = __flush_icache_all_noirq,
479 .iinr = __flush_icache_range_noirq,
480 .de = __enable_dcache_msr,
481 .dd = __disable_dcache_msr,
482 .dfl = __invalidate_dcache_all_noirq_wt,
483 .dflr = __invalidate_dcache_range_nomsr_wt,
484 .din = __invalidate_dcache_all_noirq_wt,
485 .dinr = __invalidate_dcache_range_nomsr_wt,
486};
487
488const struct scache wt_nomsr_noirq = {
489 .ie = __enable_icache_nomsr,
490 .id = __disable_icache_nomsr,
491 .ifl = __flush_icache_all_noirq,
492 .iflr = __flush_icache_range_noirq,
493 .iin = __flush_icache_all_noirq,
494 .iinr = __flush_icache_range_noirq,
495 .de = __enable_dcache_nomsr,
496 .dd = __disable_dcache_nomsr,
497 .dfl = __invalidate_dcache_all_noirq_wt,
498 .dflr = __invalidate_dcache_range_nomsr_wt,
499 .din = __invalidate_dcache_all_noirq_wt,
500 .dinr = __invalidate_dcache_range_nomsr_wt,
501};
502
503/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
504#define CPUVER_7_20_A 0x0c
505#define CPUVER_7_20_D 0x0f
506
507#define INFO(s) printk(KERN_INFO "cache: " s " \n");
508
509void microblaze_cache_init(void)
510{
511 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
512 if (cpuinfo.dcache_wb) {
513 INFO("wb_msr");
514 mbc = (struct scache *)&wb_msr;
515 if (cpuinfo.ver_code < CPUVER_7_20_D) {
516 /* MS: problem with signal handling - hw bug */
517 INFO("WB won't work properly");
518 }
519 } else {
520 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
521 INFO("wt_msr_noirq");
522 mbc = (struct scache *)&wt_msr_noirq;
523 } else {
524 INFO("wt_msr");
525 mbc = (struct scache *)&wt_msr;
526 }
527 }
528 } else {
529 if (cpuinfo.dcache_wb) {
530 INFO("wb_nomsr");
531 mbc = (struct scache *)&wb_nomsr;
532 if (cpuinfo.ver_code < CPUVER_7_20_D) {
533 /* MS: problem with signal handling - hw bug */
534 INFO("WB won't work properly");
535 }
536 } else {
537 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
538 INFO("wt_nomsr_noirq");
539 mbc = (struct scache *)&wt_nomsr_noirq;
540 } else {
541 INFO("wt_nomsr");
542 mbc = (struct scache *)&wt_nomsr;
543 }
544 }
545 }
255} 546}