aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel/cpu
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2009-03-27 09:25:16 -0400
committerMichal Simek <monstr@monstr.eu>2009-03-27 09:25:16 -0400
commit8beb8503bfa305cd7d9efa590517a9c01e2f97b4 (patch)
tree910b5b96454482b4ad8540c528456a0a53291c0b /arch/microblaze/kernel/cpu
parent37069abf2973f51aa12aa9dcb86c7403c9828161 (diff)
microblaze_v8: cache support
Reviewed-by: Ingo Molnar <mingo@elte.hu> Acked-by: Stephen Neuendorffer <stephen.neuendorffer@xilinx.com> Acked-by: John Linn <john.linn@xilinx.com> Acked-by: John Williams <john.williams@petalogix.com> Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/kernel/cpu')
-rw-r--r--arch/microblaze/kernel/cpu/cache.c258
1 files changed, 258 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
new file mode 100644
index 000000000000..be9fecca4f91
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -0,0 +1,258 @@
1/*
2 * Cache control for MicroBlaze cache memories
3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 */
12
13#include <asm/cacheflush.h>
14#include <linux/cache.h>
15#include <asm/cpuinfo.h>
16
17/* Exported functions */
18
19void _enable_icache(void)
20{
21 if (cpuinfo.use_icache) {
22#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
23 __asm__ __volatile__ (" \
24 msrset r0, %0; \
25 nop; " \
26 : \
27 : "i" (MSR_ICE) \
28 : "memory");
29#else
30 __asm__ __volatile__ (" \
31 mfs r12, rmsr; \
32 nop; \
33 ori r12, r12, %0; \
34 mts rmsr, r12; \
35 nop; " \
36 : \
37 : "i" (MSR_ICE) \
38 : "memory", "r12");
39#endif
40 }
41}
42
43void _disable_icache(void)
44{
45 if (cpuinfo.use_icache) {
46#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
47 __asm__ __volatile__ (" \
48 msrclr r0, %0; \
49 nop; " \
50 : \
51 : "i" (MSR_ICE) \
52 : "memory");
53#else
54 __asm__ __volatile__ (" \
55 mfs r12, rmsr; \
56 nop; \
57 andi r12, r12, ~%0; \
58 mts rmsr, r12; \
59 nop; " \
60 : \
61 : "i" (MSR_ICE) \
62 : "memory", "r12");
63#endif
64 }
65}
66
67void _invalidate_icache(unsigned int addr)
68{
69 if (cpuinfo.use_icache) {
70 __asm__ __volatile__ (" \
71 wic %0, r0" \
72 : \
73 : "r" (addr));
74 }
75}
76
77void _enable_dcache(void)
78{
79 if (cpuinfo.use_dcache) {
80#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
81 __asm__ __volatile__ (" \
82 msrset r0, %0; \
83 nop; " \
84 : \
85 : "i" (MSR_DCE) \
86 : "memory");
87#else
88 __asm__ __volatile__ (" \
89 mfs r12, rmsr; \
90 nop; \
91 ori r12, r12, %0; \
92 mts rmsr, r12; \
93 nop; " \
94 : \
95 : "i" (MSR_DCE) \
96 : "memory", "r12");
97#endif
98 }
99}
100
101void _disable_dcache(void)
102{
103 if (cpuinfo.use_dcache) {
104#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
105 __asm__ __volatile__ (" \
106 msrclr r0, %0; \
107 nop; " \
108 : \
109 : "i" (MSR_DCE) \
110 : "memory");
111#else
112 __asm__ __volatile__ (" \
113 mfs r12, rmsr; \
114 nop; \
115 andi r12, r12, ~%0; \
116 mts rmsr, r12; \
117 nop; " \
118 : \
119 : "i" (MSR_DCE) \
120 : "memory", "r12");
121#endif
122 }
123}
124
125void _invalidate_dcache(unsigned int addr)
126{
127 if (cpuinfo.use_dcache)
128 __asm__ __volatile__ (" \
129 wdc %0, r0" \
130 : \
131 : "r" (addr));
132}
133
134void __invalidate_icache_all(void)
135{
136 unsigned int i;
137 unsigned flags;
138
139 if (cpuinfo.use_icache) {
140 local_irq_save(flags);
141 __disable_icache();
142
143 /* Just loop through cache size and invalidate, no need to add
144 CACHE_BASE address */
145 for (i = 0; i < cpuinfo.icache_size;
146 i += cpuinfo.icache_line)
147 __invalidate_icache(i);
148
149 __enable_icache();
150 local_irq_restore(flags);
151 }
152}
153
154void __invalidate_icache_range(unsigned long start, unsigned long end)
155{
156 unsigned int i;
157 unsigned flags;
158 unsigned int align;
159
160 if (cpuinfo.use_icache) {
161 /*
162 * No need to cover entire cache range,
163 * just cover cache footprint
164 */
165 end = min(start + cpuinfo.icache_size, end);
166 align = ~(cpuinfo.icache_line - 1);
167 start &= align; /* Make sure we are aligned */
168 /* Push end up to the next cache line */
169 end = ((end & align) + cpuinfo.icache_line);
170
171 local_irq_save(flags);
172 __disable_icache();
173
174 for (i = start; i < end; i += cpuinfo.icache_line)
175 __invalidate_icache(i);
176
177 __enable_icache();
178 local_irq_restore(flags);
179 }
180}
181
182void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
183{
184 __invalidate_icache_all();
185}
186
187void __invalidate_icache_user_range(struct vm_area_struct *vma,
188 struct page *page, unsigned long adr,
189 int len)
190{
191 __invalidate_icache_all();
192}
193
194void __invalidate_cache_sigtramp(unsigned long addr)
195{
196 __invalidate_icache_range(addr, addr + 8);
197}
198
199void __invalidate_dcache_all(void)
200{
201 unsigned int i;
202 unsigned flags;
203
204 if (cpuinfo.use_dcache) {
205 local_irq_save(flags);
206 __disable_dcache();
207
208 /*
209 * Just loop through cache size and invalidate,
210 * no need to add CACHE_BASE address
211 */
212 for (i = 0; i < cpuinfo.dcache_size;
213 i += cpuinfo.dcache_line)
214 __invalidate_dcache(i);
215
216 __enable_dcache();
217 local_irq_restore(flags);
218 }
219}
220
221void __invalidate_dcache_range(unsigned long start, unsigned long end)
222{
223 unsigned int i;
224 unsigned flags;
225 unsigned int align;
226
227 if (cpuinfo.use_dcache) {
228 /*
229 * No need to cover entire cache range,
230 * just cover cache footprint
231 */
232 end = min(start + cpuinfo.dcache_size, end);
233 align = ~(cpuinfo.dcache_line - 1);
234 start &= align; /* Make sure we are aligned */
235 /* Push end up to the next cache line */
236 end = ((end & align) + cpuinfo.dcache_line);
237 local_irq_save(flags);
238 __disable_dcache();
239
240 for (i = start; i < end; i += cpuinfo.dcache_line)
241 __invalidate_dcache(i);
242
243 __enable_dcache();
244 local_irq_restore(flags);
245 }
246}
247
248void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
249{
250 __invalidate_dcache_all();
251}
252
253void __invalidate_dcache_user_range(struct vm_area_struct *vma,
254 struct page *page, unsigned long adr,
255 int len)
256{
257 __invalidate_dcache_all();
258}