aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2009-12-10 05:43:57 -0500
committerMichal Simek <monstr@monstr.eu>2009-12-14 02:45:10 -0500
commit2ee2ff875a4d3bdb941e2bb1173cd927c09d5a67 (patch)
treea1ec4db3055527a2814cbdb006652dbf0885b348
parentc8983a5c6ecc5ca68a871c44bc35f714663a4dfa (diff)
microblaze: Support for WB cache
Microblaze version 7.20.d is the first MB version which can be run on MMU linux. Please do not used previous version because they contain HW bug. Based on WB support was necessary to redesign whole cache design. Microblaze versions from 7.20.a don't need to disable IRQ and cache before working with them that's why there are special structures for it. Signed-off-by: Michal Simek <monstr@monstr.eu>
-rw-r--r--arch/microblaze/include/asm/cacheflush.h140
-rw-r--r--arch/microblaze/kernel/cpu/cache.c663
-rw-r--r--arch/microblaze/kernel/setup.c9
-rw-r--r--arch/microblaze/kernel/signal.c30
4 files changed, 574 insertions, 268 deletions
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 1f04b911145..a6edd356cd0 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -18,6 +18,8 @@
18/* Somebody depends on this; sigh... */ 18/* Somebody depends on this; sigh... */
19#include <linux/mm.h> 19#include <linux/mm.h>
20 20
21/* Look at Documentation/cachetlb.txt */
22
21/* 23/*
22 * Cache handling functions. 24 * Cache handling functions.
23 * Microblaze has a write-through data cache, meaning that the data cache 25 * Microblaze has a write-through data cache, meaning that the data cache
@@ -27,95 +29,81 @@
27 * instruction cache to make sure we don't fetch old, bad code. 29 * instruction cache to make sure we don't fetch old, bad code.
28 */ 30 */
29 31
32/* struct cache, d=dcache, i=icache, fl = flush, iv = invalidate,
33 * suffix r = range */
34struct scache {
35 /* icache */
36 void (*ie)(void); /* enable */
37 void (*id)(void); /* disable */
38 void (*ifl)(void); /* flush */
39 void (*iflr)(unsigned long a, unsigned long b);
40 void (*iin)(void); /* invalidate */
41 void (*iinr)(unsigned long a, unsigned long b);
42 /* dcache */
43 void (*de)(void); /* enable */
44 void (*dd)(void); /* disable */
45 void (*dfl)(void); /* flush */
46 void (*dflr)(unsigned long a, unsigned long b);
47 void (*din)(void); /* invalidate */
48 void (*dinr)(unsigned long a, unsigned long b);
49};
50
51/* microblaze cache */
52extern struct scache *mbc;
53
54void microblaze_cache_init(void);
55
56#define enable_icache() mbc->ie();
57#define disable_icache() mbc->id();
58#define flush_icache() mbc->ifl();
59#define flush_icache_range(start, end) mbc->iflr(start, end);
60#define invalidate_icache() mbc->iin();
61#define invalidate_icache_range(start, end) mbc->iinr(start, end);
62
63
64#define flush_icache_user_range(vma, pg, adr, len) flush_icache();
65#define flush_icache_page(vma, pg) do { } while (0)
66
67#define enable_dcache() mbc->de();
68#define disable_dcache() mbc->dd();
30/* FIXME for LL-temac driver */ 69/* FIXME for LL-temac driver */
31#define invalidate_dcache_range(start, end) \ 70#define invalidate_dcache() mbc->din();
32 __invalidate_dcache_range(start, end) 71#define invalidate_dcache_range(start, end) mbc->dinr(start, end);
33 72#define flush_dcache() mbc->dfl();
34#define flush_cache_all() __invalidate_cache_all() 73#define flush_dcache_range(start, end) mbc->dflr(start, end);
35#define flush_cache_mm(mm) do { } while (0)
36#define flush_cache_range(vma, start, end) __invalidate_cache_all()
37#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
38 74
39#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end)
40#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 75#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
76/* D-cache aliasing problem can't happen - cache is between MMU and ram */
41#define flush_dcache_page(page) do { } while (0) 77#define flush_dcache_page(page) do { } while (0)
42#define flush_dcache_mmap_lock(mapping) do { } while (0) 78#define flush_dcache_mmap_lock(mapping) do { } while (0)
43#define flush_dcache_mmap_unlock(mapping) do { } while (0) 79#define flush_dcache_mmap_unlock(mapping) do { } while (0)
44 80
45#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
46#define flush_icache_page(vma, pg) do { } while (0)
47
48#ifndef CONFIG_MMU
49# define flush_icache_user_range(start, len) do { } while (0)
50#else
51# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
52
53# define flush_page_to_ram(page) do { } while (0)
54 81
55# define flush_icache() __invalidate_icache_all() 82#define flush_cache_dup_mm(mm) do { } while (0)
56# define flush_cache_sigtramp(vaddr) \ 83#define flush_cache_vmap(start, end) do { } while (0)
57 __invalidate_icache_range(vaddr, vaddr + 8) 84#define flush_cache_vunmap(start, end) do { } while (0)
58 85#define flush_cache_mm(mm) do { } while (0)
59# define flush_dcache_mmap_lock(mapping) do { } while (0) 86#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
60# define flush_dcache_mmap_unlock(mapping) do { } while (0)
61 87
62# define flush_cache_dup_mm(mm) do { } while (0) 88/* MS: kgdb code use this macro, wrong len with FLASH */
89#if 0
90#define flush_cache_range(vma, start, len) { \
91 flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
92 flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
93}
63#endif 94#endif
64 95
65#define flush_cache_vmap(start, end) do { } while (0) 96#define flush_cache_range(vma, start, len) do { } while (0)
66#define flush_cache_vunmap(start, end) do { } while (0)
67
68
69void _enable_icache(void);
70void _disable_icache(void);
71void _invalidate_icache(unsigned int addr);
72
73#define __enable_icache() _enable_icache()
74#define __disable_icache() _disable_icache()
75#define __invalidate_icache(addr) _invalidate_icache(addr)
76
77void _enable_dcache(void);
78void _disable_dcache(void);
79void _invalidate_dcache(unsigned int addr);
80
81#define __enable_dcache() _enable_dcache()
82#define __disable_dcache() _disable_dcache()
83#define __invalidate_dcache(addr) _invalidate_dcache(addr)
84
85struct page;
86struct mm_struct;
87struct vm_area_struct;
88
89/* see arch/microblaze/kernel/cache.c */
90extern void __invalidate_icache_all(void);
91extern void __invalidate_icache_range(unsigned long start, unsigned long end);
92extern void __invalidate_icache_page(struct vm_area_struct *vma,
93 struct page *page);
94extern void __invalidate_icache_user_range(struct vm_area_struct *vma,
95 struct page *page,
96 unsigned long adr, int len);
97extern void __invalidate_cache_sigtramp(unsigned long addr);
98
99extern void __invalidate_dcache_all(void);
100extern void __invalidate_dcache_range(unsigned long start, unsigned long end);
101extern void __invalidate_dcache_page(struct vm_area_struct *vma,
102 struct page *page);
103extern void __invalidate_dcache_user_range(struct vm_area_struct *vma,
104 struct page *page,
105 unsigned long adr, int len);
106
107extern inline void __invalidate_cache_all(void)
108{
109 __invalidate_icache_all();
110 __invalidate_dcache_all();
111}
112 97
113#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 98#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
114do { memcpy((dst), (src), (len)); \ 99do { \
115 flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ 100 memcpy((dst), (src), (len)); \
101 flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
116} while (0) 102} while (0)
117 103
118#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 104#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
119 memcpy((dst), (src), (len)) 105do { \
106 memcpy((dst), (src), (len)); \
107} while (0)
120 108
121#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ 109#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 538f1df6761..d9d63831cc2 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this 9 * Public License. See the file COPYING in the main directory of this
@@ -13,243 +13,534 @@
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <linux/cache.h> 14#include <linux/cache.h>
15#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
16#include <asm/pvr.h>
16 17
17/* Exported functions */ 18static inline void __invalidate_flush_icache(unsigned int addr)
19{
20 __asm__ __volatile__ ("wic %0, r0;" \
21 : : "r" (addr));
22}
23
24static inline void __flush_dcache(unsigned int addr)
25{
26 __asm__ __volatile__ ("wdc.flush %0, r0;" \
27 : : "r" (addr));
28}
29
30static inline void __invalidate_dcache(unsigned int baseaddr,
31 unsigned int offset)
32{
33 __asm__ __volatile__ ("wdc.clear %0, %1;" \
34 : : "r" (baseaddr), "r" (offset));
35}
18 36
19void _enable_icache(void) 37static inline void __enable_icache_msr(void)
20{ 38{
21 if (cpuinfo.use_icache) { 39 __asm__ __volatile__ (" msrset r0, %0; \
22#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 40 nop; " \
23 __asm__ __volatile__ (" \ 41 : : "i" (MSR_ICE) : "memory");
24 msrset r0, %0; \ 42}
25 nop; " \ 43
26 : \ 44static inline void __disable_icache_msr(void)
27 : "i" (MSR_ICE) \ 45{
46 __asm__ __volatile__ (" msrclr r0, %0; \
47 nop; " \
48 : : "i" (MSR_ICE) : "memory");
49}
50
51static inline void __enable_dcache_msr(void)
52{
53 __asm__ __volatile__ (" msrset r0, %0; \
54 nop; " \
55 : \
56 : "i" (MSR_DCE) \
28 : "memory"); 57 : "memory");
29#else
30 __asm__ __volatile__ (" \
31 mfs r12, rmsr; \
32 nop; \
33 ori r12, r12, %0; \
34 mts rmsr, r12; \
35 nop; " \
36 : \
37 : "i" (MSR_ICE) \
38 : "memory", "r12");
39#endif
40 }
41} 58}
42 59
43void _disable_icache(void) 60static inline void __disable_dcache_msr(void)
44{ 61{
45 if (cpuinfo.use_icache) { 62 __asm__ __volatile__ (" msrclr r0, %0; \
46#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 63 nop; " \
47 __asm__ __volatile__ (" \ 64 : \
48 msrclr r0, %0; \ 65 : "i" (MSR_DCE) \
49 nop; " \
50 : \
51 : "i" (MSR_ICE) \
52 : "memory"); 66 : "memory");
53#else 67}
54 __asm__ __volatile__ (" \ 68
55 mfs r12, rmsr; \ 69static inline void __enable_icache_nomsr(void)
56 nop; \ 70{
57 andi r12, r12, ~%0; \ 71 __asm__ __volatile__ (" mfs r12, rmsr; \
58 mts rmsr, r12; \ 72 nop; \
59 nop; " \ 73 ori r12, r12, %0; \
60 : \ 74 mts rmsr, r12; \
61 : "i" (MSR_ICE) \ 75 nop; " \
76 : \
77 : "i" (MSR_ICE) \
62 : "memory", "r12"); 78 : "memory", "r12");
63#endif
64 }
65} 79}
66 80
67void _invalidate_icache(unsigned int addr) 81static inline void __disable_icache_nomsr(void)
68{ 82{
69 if (cpuinfo.use_icache) { 83 __asm__ __volatile__ (" mfs r12, rmsr; \
70 __asm__ __volatile__ (" \ 84 nop; \
71 wic %0, r0" \ 85 andi r12, r12, ~%0; \
72 : \ 86 mts rmsr, r12; \
73 : "r" (addr)); 87 nop; " \
74 } 88 : \
89 : "i" (MSR_ICE) \
90 : "memory", "r12");
75} 91}
76 92
77void _enable_dcache(void) 93static inline void __enable_dcache_nomsr(void)
78{ 94{
79 if (cpuinfo.use_dcache) { 95 __asm__ __volatile__ (" mfs r12, rmsr; \
80#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 96 nop; \
81 __asm__ __volatile__ (" \ 97 ori r12, r12, %0; \
82 msrset r0, %0; \ 98 mts rmsr, r12; \
83 nop; " \ 99 nop; " \
84 : \ 100 : \
85 : "i" (MSR_DCE) \ 101 : "i" (MSR_DCE) \
86 : "memory");
87#else
88 __asm__ __volatile__ (" \
89 mfs r12, rmsr; \
90 nop; \
91 ori r12, r12, %0; \
92 mts rmsr, r12; \
93 nop; " \
94 : \
95 : "i" (MSR_DCE) \
96 : "memory", "r12"); 102 : "memory", "r12");
97#endif
98 }
99} 103}
100 104
101void _disable_dcache(void) 105static inline void __disable_dcache_nomsr(void)
102{ 106{
103#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 107 __asm__ __volatile__ (" mfs r12, rmsr; \
104 __asm__ __volatile__ (" \ 108 nop; \
105 msrclr r0, %0; \ 109 andi r12, r12, ~%0; \
106 nop; " \ 110 mts rmsr, r12; \
107 : \ 111 nop; " \
108 : "i" (MSR_DCE) \ 112 : \
109 : "memory"); 113 : "i" (MSR_DCE) \
110#else
111 __asm__ __volatile__ (" \
112 mfs r12, rmsr; \
113 nop; \
114 andi r12, r12, ~%0; \
115 mts rmsr, r12; \
116 nop; " \
117 : \
118 : "i" (MSR_DCE) \
119 : "memory", "r12"); 114 : "memory", "r12");
120#endif
121} 115}
122 116
123void _invalidate_dcache(unsigned int addr) 117
118/* Helper macro for computing the limits of cache range loops */
119#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
120do { \
121 int align = ~(cache_line_length - 1); \
122 end = min(start + cache_size, end); \
123 start &= align; \
124 end = ((end & align) + cache_line_length); \
125} while (0);
126
127/*
128 * Helper macro to loop over the specified cache_size/line_length and
129 * execute 'op' on that cacheline
130 */
131#define CACHE_ALL_LOOP(cache_size, line_length, op) \
132do { \
133 unsigned int len = cache_size; \
134 int step = -line_length; \
135 BUG_ON(step >= 0); \
136 \
137 __asm__ __volatile__ (" 1: " #op " %0, r0; \
138 bgtid %0, 1b; \
139 addk %0, %0, %1; \
140 " : : "r" (len), "r" (step) \
141 : "memory"); \
142} while (0);
143
144
145#define CACHE_ALL_LOOP2(cache_size, line_length, op) \
146do { \
147 unsigned int len = cache_size; \
148 int step = -line_length; \
149 BUG_ON(step >= 0); \
150 \
151 __asm__ __volatile__ (" 1: " #op " r0, %0; \
152 bgtid %0, 1b; \
153 addk %0, %0, %1; \
154 " : : "r" (len), "r" (step) \
155 : "memory"); \
156} while (0);
157
158/* for wdc.flush/clear */
159#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
160do { \
161 int step = -line_length; \
162 int count = end - start; \
163 BUG_ON(count <= 0); \
164 \
165 __asm__ __volatile__ (" 1: " #op " %0, %1; \
166 bgtid %1, 1b; \
167 addk %1, %1, %2; \
168 " : : "r" (start), "r" (count), \
169 "r" (step) : "memory"); \
170} while (0);
171
172/* It is used only first parameter for OP - for wic, wdc */
173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
174do { \
175 int step = -line_length; \
176 int count = end - start; \
177 BUG_ON(count <= 0); \
178 \
179 __asm__ __volatile__ (" 1: addk %0, %0, %1; \
180 " #op " %0, r0; \
181 bgtid %1, 1b; \
182 addk %1, %1, %2; \
183 " : : "r" (start), "r" (count), \
184 "r" (step) : "memory"); \
185} while (0);
186
187static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
124{ 188{
125 __asm__ __volatile__ (" \ 189 unsigned long flags;
126 wdc %0, r0" \ 190
127 : \ 191 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
128 : "r" (addr)); 192 (unsigned int)start, (unsigned int) end);
193
194 CACHE_LOOP_LIMITS(start, end,
195 cpuinfo.icache_line_length, cpuinfo.icache_size);
196
197 local_irq_save(flags);
198 __disable_icache_msr();
199
200 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
201
202 __enable_icache_msr();
203 local_irq_restore(flags);
129} 204}
130 205
131void __invalidate_icache_all(void) 206static void __flush_icache_range_nomsr_irq(unsigned long start,
207 unsigned long end)
132{ 208{
133 unsigned int i; 209 unsigned long flags;
134 unsigned flags;
135 210
136 if (cpuinfo.use_icache) { 211 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
137 local_irq_save(flags); 212 (unsigned int)start, (unsigned int) end);
138 __disable_icache();
139 213
140 /* Just loop through cache size and invalidate, no need to add 214 CACHE_LOOP_LIMITS(start, end,
141 CACHE_BASE address */ 215 cpuinfo.icache_line_length, cpuinfo.icache_size);
142 for (i = 0; i < cpuinfo.icache_size;
143 i += cpuinfo.icache_line_length)
144 __invalidate_icache(i);
145 216
146 __enable_icache(); 217 local_irq_save(flags);
147 local_irq_restore(flags); 218 __disable_icache_nomsr();
148 } 219
220 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
221
222 __enable_icache_nomsr();
223 local_irq_restore(flags);
149} 224}
150 225
151void __invalidate_icache_range(unsigned long start, unsigned long end) 226static void __flush_icache_range_noirq(unsigned long start,
227 unsigned long end)
152{ 228{
153 unsigned int i; 229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
154 unsigned flags; 230 (unsigned int)start, (unsigned int) end);
155 unsigned int align; 231
156 232 CACHE_LOOP_LIMITS(start, end,
157 if (cpuinfo.use_icache) { 233 cpuinfo.icache_line_length, cpuinfo.icache_size);
158 /* 234 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
159 * No need to cover entire cache range, 235}
160 * just cover cache footprint 236
161 */ 237static void __flush_icache_all_msr_irq(void)
162 end = min(start + cpuinfo.icache_size, end); 238{
163 align = ~(cpuinfo.icache_line_length - 1); 239 unsigned long flags;
164 start &= align; /* Make sure we are aligned */ 240
165 /* Push end up to the next cache line */ 241 pr_debug("%s\n", __func__);
166 end = ((end & align) + cpuinfo.icache_line_length); 242
167 243 local_irq_save(flags);
168 local_irq_save(flags); 244 __disable_icache_msr();
169 __disable_icache(); 245
170 246 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
171 for (i = start; i < end; i += cpuinfo.icache_line_length) 247
172 __invalidate_icache(i); 248 __enable_icache_msr();
173 249 local_irq_restore(flags);
174 __enable_icache(); 250}
175 local_irq_restore(flags); 251
176 } 252static void __flush_icache_all_nomsr_irq(void)
253{
254 unsigned long flags;
255
256 pr_debug("%s\n", __func__);
257
258 local_irq_save(flags);
259 __disable_icache_nomsr();
260
261 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
262
263 __enable_icache_nomsr();
264 local_irq_restore(flags);
177} 265}
178 266
179void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page) 267static void __flush_icache_all_noirq(void)
180{ 268{
181 __invalidate_icache_all(); 269 pr_debug("%s\n", __func__);
270 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
182} 271}
183 272
184void __invalidate_icache_user_range(struct vm_area_struct *vma, 273static void __invalidate_dcache_all_msr_irq(void)
185 struct page *page, unsigned long adr,
186 int len)
187{ 274{
188 __invalidate_icache_all(); 275 unsigned long flags;
276
277 pr_debug("%s\n", __func__);
278
279 local_irq_save(flags);
280 __disable_dcache_msr();
281
282 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
283
284 __enable_dcache_msr();
285 local_irq_restore(flags);
189} 286}
190 287
191void __invalidate_cache_sigtramp(unsigned long addr) 288static void __invalidate_dcache_all_nomsr_irq(void)
192{ 289{
193 __invalidate_icache_range(addr, addr + 8); 290 unsigned long flags;
291
292 pr_debug("%s\n", __func__);
293
294 local_irq_save(flags);
295 __disable_dcache_nomsr();
296
297 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
298
299 __enable_dcache_nomsr();
300 local_irq_restore(flags);
194} 301}
195 302
196void __invalidate_dcache_all(void) 303static void __invalidate_dcache_all_noirq_wt(void)
197{ 304{
198 unsigned int i; 305 pr_debug("%s\n", __func__);
199 unsigned flags; 306 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
200
201 if (cpuinfo.use_dcache) {
202 local_irq_save(flags);
203 __disable_dcache();
204
205 /*
206 * Just loop through cache size and invalidate,
207 * no need to add CACHE_BASE address
208 */
209 for (i = 0; i < cpuinfo.dcache_size;
210 i += cpuinfo.dcache_line_length)
211 __invalidate_dcache(i);
212
213 __enable_dcache();
214 local_irq_restore(flags);
215 }
216} 307}
217 308
218void __invalidate_dcache_range(unsigned long start, unsigned long end) 309/* FIXME this is weird - should be only wdc but not work
310 * MS: I am getting bus errors and other weird things */
311static void __invalidate_dcache_all_wb(void)
219{ 312{
313 pr_debug("%s\n", __func__);
314 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
315 wdc.clear)
316
317#if 0
220 unsigned int i; 318 unsigned int i;
221 unsigned flags; 319
222 unsigned int align; 320 pr_debug("%s\n", __func__);
223 321
224 if (cpuinfo.use_dcache) { 322 /* Just loop through cache size and invalidate it */
225 /* 323 for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
226 * No need to cover entire cache range, 324 __invalidate_dcache(0, i);
227 * just cover cache footprint 325#endif
228 */ 326}
229 end = min(start + cpuinfo.dcache_size, end); 327
230 align = ~(cpuinfo.dcache_line_length - 1); 328static void __invalidate_dcache_range_wb(unsigned long start,
231 start &= align; /* Make sure we are aligned */ 329 unsigned long end)
232 /* Push end up to the next cache line */ 330{
233 end = ((end & align) + cpuinfo.dcache_line_length); 331 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
234 local_irq_save(flags); 332 (unsigned int)start, (unsigned int) end);
235 __disable_dcache(); 333
236 334 CACHE_LOOP_LIMITS(start, end,
237 for (i = start; i < end; i += cpuinfo.dcache_line_length) 335 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
238 __invalidate_dcache(i); 336 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
239 337}
240 __enable_dcache(); 338
241 local_irq_restore(flags); 339static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
242 } 340 unsigned long end)
341{
342 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
343 (unsigned int)start, (unsigned int) end);
344 CACHE_LOOP_LIMITS(start, end,
345 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
346
347 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
243} 348}
244 349
245void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page) 350static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
351 unsigned long end)
246{ 352{
247 __invalidate_dcache_all(); 353 unsigned long flags;
354
355 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
356 (unsigned int)start, (unsigned int) end);
357 CACHE_LOOP_LIMITS(start, end,
358 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
359
360 local_irq_save(flags);
361 __disable_dcache_msr();
362
363 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
364
365 __enable_dcache_msr();
366 local_irq_restore(flags);
367}
368
369static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
370 unsigned long end)
371{
372 unsigned long flags;
373
374 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
375 (unsigned int)start, (unsigned int) end);
376
377 CACHE_LOOP_LIMITS(start, end,
378 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
379
380 local_irq_save(flags);
381 __disable_dcache_nomsr();
382
383 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
384
385 __enable_dcache_nomsr();
386 local_irq_restore(flags);
387}
388
389static void __flush_dcache_all_wb(void)
390{
391 pr_debug("%s\n", __func__);
392 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
393 wdc.flush);
248} 394}
249 395
250void __invalidate_dcache_user_range(struct vm_area_struct *vma, 396static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
251 struct page *page, unsigned long adr,
252 int len)
253{ 397{
254 __invalidate_dcache_all(); 398 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
399 (unsigned int)start, (unsigned int) end);
400
401 CACHE_LOOP_LIMITS(start, end,
402 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
403 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
404}
405
406/* struct for wb caches and for wt caches */
407struct scache *mbc;
408
409/* new wb cache model */
410const struct scache wb_msr = {
411 .ie = __enable_icache_msr,
412 .id = __disable_icache_msr,
413 .ifl = __flush_icache_all_noirq,
414 .iflr = __flush_icache_range_noirq,
415 .iin = __flush_icache_all_noirq,
416 .iinr = __flush_icache_range_noirq,
417 .de = __enable_dcache_msr,
418 .dd = __disable_dcache_msr,
419 .dfl = __flush_dcache_all_wb,
420 .dflr = __flush_dcache_range_wb,
421 .din = __invalidate_dcache_all_wb,
422 .dinr = __invalidate_dcache_range_wb,
423};
424
425/* There is only difference in ie, id, de, dd functions */
426const struct scache wb_nomsr = {
427 .ie = __enable_icache_nomsr,
428 .id = __disable_icache_nomsr,
429 .ifl = __flush_icache_all_noirq,
430 .iflr = __flush_icache_range_noirq,
431 .iin = __flush_icache_all_noirq,
432 .iinr = __flush_icache_range_noirq,
433 .de = __enable_dcache_nomsr,
434 .dd = __disable_dcache_nomsr,
435 .dfl = __flush_dcache_all_wb,
436 .dflr = __flush_dcache_range_wb,
437 .din = __invalidate_dcache_all_wb,
438 .dinr = __invalidate_dcache_range_wb,
439};
440
441/* Old wt cache model with disabling irq and turn off cache */
442const struct scache wt_msr = {
443 .ie = __enable_icache_msr,
444 .id = __disable_icache_msr,
445 .ifl = __flush_icache_all_msr_irq,
446 .iflr = __flush_icache_range_msr_irq,
447 .iin = __flush_icache_all_msr_irq,
448 .iinr = __flush_icache_range_msr_irq,
449 .de = __enable_dcache_msr,
450 .dd = __disable_dcache_msr,
451 .dfl = __invalidate_dcache_all_msr_irq,
452 .dflr = __invalidate_dcache_range_msr_irq_wt,
453 .din = __invalidate_dcache_all_msr_irq,
454 .dinr = __invalidate_dcache_range_msr_irq_wt,
455};
456
457const struct scache wt_nomsr = {
458 .ie = __enable_icache_nomsr,
459 .id = __disable_icache_nomsr,
460 .ifl = __flush_icache_all_nomsr_irq,
461 .iflr = __flush_icache_range_nomsr_irq,
462 .iin = __flush_icache_all_nomsr_irq,
463 .iinr = __flush_icache_range_nomsr_irq,
464 .de = __enable_dcache_nomsr,
465 .dd = __disable_dcache_nomsr,
466 .dfl = __invalidate_dcache_all_nomsr_irq,
467 .dflr = __invalidate_dcache_range_nomsr_irq,
468 .din = __invalidate_dcache_all_nomsr_irq,
469 .dinr = __invalidate_dcache_range_nomsr_irq,
470};
471
472/* New wt cache model for newer Microblaze versions */
473const struct scache wt_msr_noirq = {
474 .ie = __enable_icache_msr,
475 .id = __disable_icache_msr,
476 .ifl = __flush_icache_all_noirq,
477 .iflr = __flush_icache_range_noirq,
478 .iin = __flush_icache_all_noirq,
479 .iinr = __flush_icache_range_noirq,
480 .de = __enable_dcache_msr,
481 .dd = __disable_dcache_msr,
482 .dfl = __invalidate_dcache_all_noirq_wt,
483 .dflr = __invalidate_dcache_range_nomsr_wt,
484 .din = __invalidate_dcache_all_noirq_wt,
485 .dinr = __invalidate_dcache_range_nomsr_wt,
486};
487
488const struct scache wt_nomsr_noirq = {
489 .ie = __enable_icache_nomsr,
490 .id = __disable_icache_nomsr,
491 .ifl = __flush_icache_all_noirq,
492 .iflr = __flush_icache_range_noirq,
493 .iin = __flush_icache_all_noirq,
494 .iinr = __flush_icache_range_noirq,
495 .de = __enable_dcache_nomsr,
496 .dd = __disable_dcache_nomsr,
497 .dfl = __invalidate_dcache_all_noirq_wt,
498 .dflr = __invalidate_dcache_range_nomsr_wt,
499 .din = __invalidate_dcache_all_noirq_wt,
500 .dinr = __invalidate_dcache_range_nomsr_wt,
501};
502
503/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
504#define CPUVER_7_20_A 0x0c
505#define CPUVER_7_20_D 0x0f
506
507#define INFO(s) printk(KERN_INFO "cache: " s " \n");
508
509void microblaze_cache_init(void)
510{
511 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
512 if (cpuinfo.dcache_wb) {
513 INFO("wb_msr");
514 mbc = (struct scache *)&wb_msr;
515 if (cpuinfo.ver_code < CPUVER_7_20_D) {
516 /* MS: problem with signal handling - hw bug */
517 INFO("WB won't work properly");
518 }
519 } else {
520 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
521 INFO("wt_msr_noirq");
522 mbc = (struct scache *)&wt_msr_noirq;
523 } else {
524 INFO("wt_msr");
525 mbc = (struct scache *)&wt_msr;
526 }
527 }
528 } else {
529 if (cpuinfo.dcache_wb) {
530 INFO("wb_nomsr");
531 mbc = (struct scache *)&wb_nomsr;
532 if (cpuinfo.ver_code < CPUVER_7_20_D) {
533 /* MS: problem with signal handling - hw bug */
534 INFO("WB won't work properly");
535 }
536 } else {
537 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
538 INFO("wt_nomsr_noirq");
539 mbc = (struct scache *)&wt_nomsr_noirq;
540 } else {
541 INFO("wt_nomsr");
542 mbc = (struct scache *)&wt_nomsr;
543 }
544 }
545 }
255} 546}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 1c3f18ba8af..5372b24ad04 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -52,11 +52,12 @@ void __init setup_arch(char **cmdline_p)
52 /* irq_early_init(); */ 52 /* irq_early_init(); */
53 setup_cpuinfo(); 53 setup_cpuinfo();
54 54
55 __invalidate_icache_all(); 55 microblaze_cache_init();
56 __enable_icache();
57 56
58 __invalidate_dcache_all(); 57 enable_dcache();
59 __enable_dcache(); 58
59 invalidate_icache();
60 enable_icache();
60 61
61 setup_memory(); 62 setup_memory();
62 63
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 0c96ac34c31..6de3db04b1a 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -176,6 +176,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
176 struct rt_sigframe __user *frame; 176 struct rt_sigframe __user *frame;
177 int err = 0; 177 int err = 0;
178 int signal; 178 int signal;
179 unsigned long address = 0;
180#ifdef CONFIG_MMU
181 pmd_t *pmdp;
182 pte_t *ptep;
183#endif
179 184
180 frame = get_sigframe(ka, regs, sizeof(*frame)); 185 frame = get_sigframe(ka, regs, sizeof(*frame));
181 186
@@ -216,8 +221,29 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
216 Negative 8 offset because return is rtsd r15, 8 */ 221 Negative 8 offset because return is rtsd r15, 8 */
217 regs->r15 = ((unsigned long)frame->tramp)-8; 222 regs->r15 = ((unsigned long)frame->tramp)-8;
218 223
219 __invalidate_cache_sigtramp((unsigned long)frame->tramp); 224 address = ((unsigned long)frame->tramp);
220 225#ifdef CONFIG_MMU
226 pmdp = pmd_offset(pud_offset(
227 pgd_offset(current->mm, address),
228 address), address);
229
230 preempt_disable();
231 ptep = pte_offset_map(pmdp, address);
232 if (pte_present(*ptep)) {
233 address = (unsigned long) page_address(pte_page(*ptep));
234 /* MS: I need add offset in page */
235 address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
236 /* MS address is virtual */
237 address = virt_to_phys(address);
238 invalidate_icache_range(address, address + 8);
239 flush_dcache_range(address, address + 8);
240 }
241 pte_unmap(ptep);
242 preempt_enable();
243#else
244 flush_icache_range(address, address + 8);
245 flush_dcache_range(address, address + 8);
246#endif
221 if (err) 247 if (err)
222 goto give_sigsegv; 248 goto give_sigsegv;
223 249