diff options
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 362 |
1 files changed, 362 insertions, 0 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c new file mode 100644 index 000000000000..ab833adf28c3 --- /dev/null +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache-sh4.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt | ||
6 | * Copyright (C) 2003 Richard Curnow | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/mman.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <asm/addrspace.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/processor.h> | ||
22 | #include <asm/cache.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | |||
29 | extern void __flush_cache_4096_all(unsigned long start); | ||
30 | static void __flush_cache_4096_all_ex(unsigned long start); | ||
31 | extern void __flush_dcache_all(void); | ||
32 | static void __flush_dcache_all_ex(void); | ||
33 | |||
34 | /* | ||
35 | * SH-4 has virtually indexed and physically tagged cache. | ||
36 | */ | ||
37 | |||
38 | struct semaphore p3map_sem[4]; | ||
39 | |||
40 | void __init p3_cache_init(void) | ||
41 | { | ||
42 | if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE)) | ||
43 | panic("%s failed.", __FUNCTION__); | ||
44 | |||
45 | sema_init (&p3map_sem[0], 1); | ||
46 | sema_init (&p3map_sem[1], 1); | ||
47 | sema_init (&p3map_sem[2], 1); | ||
48 | sema_init (&p3map_sem[3], 1); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Write back the dirty D-caches, but not invalidate them. | ||
53 | * | ||
54 | * START: Virtual Address (U0, P1, or P3) | ||
55 | * SIZE: Size of the region. | ||
56 | */ | ||
57 | void __flush_wback_region(void *start, int size) | ||
58 | { | ||
59 | unsigned long v; | ||
60 | unsigned long begin, end; | ||
61 | |||
62 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
63 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
64 | & ~(L1_CACHE_BYTES-1); | ||
65 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
66 | asm volatile("ocbwb %0" | ||
67 | : /* no output */ | ||
68 | : "m" (__m(v))); | ||
69 | } | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Write back the dirty D-caches and invalidate them. | ||
74 | * | ||
75 | * START: Virtual Address (U0, P1, or P3) | ||
76 | * SIZE: Size of the region. | ||
77 | */ | ||
78 | void __flush_purge_region(void *start, int size) | ||
79 | { | ||
80 | unsigned long v; | ||
81 | unsigned long begin, end; | ||
82 | |||
83 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
84 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
85 | & ~(L1_CACHE_BYTES-1); | ||
86 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
87 | asm volatile("ocbp %0" | ||
88 | : /* no output */ | ||
89 | : "m" (__m(v))); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | |||
94 | /* | ||
95 | * No write back please | ||
96 | */ | ||
97 | void __flush_invalidate_region(void *start, int size) | ||
98 | { | ||
99 | unsigned long v; | ||
100 | unsigned long begin, end; | ||
101 | |||
102 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
103 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
104 | & ~(L1_CACHE_BYTES-1); | ||
105 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
106 | asm volatile("ocbi %0" | ||
107 | : /* no output */ | ||
108 | : "m" (__m(v))); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | static void __flush_dcache_all_ex(void) | ||
113 | { | ||
114 | unsigned long addr, end_addr, entry_offset; | ||
115 | |||
116 | end_addr = CACHE_OC_ADDRESS_ARRAY + (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) * cpu_data->dcache.ways; | ||
117 | entry_offset = 1 << cpu_data->dcache.entry_shift; | ||
118 | for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; addr += entry_offset) { | ||
119 | ctrl_outl(0, addr); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static void __flush_cache_4096_all_ex(unsigned long start) | ||
124 | { | ||
125 | unsigned long addr, entry_offset; | ||
126 | int i; | ||
127 | |||
128 | entry_offset = 1 << cpu_data->dcache.entry_shift; | ||
129 | for (i = 0; i < cpu_data->dcache.ways; i++, start += cpu_data->dcache.way_incr) { | ||
130 | for (addr = CACHE_OC_ADDRESS_ARRAY + start; | ||
131 | addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start; | ||
132 | addr += entry_offset) { | ||
133 | ctrl_outl(0, addr); | ||
134 | } | ||
135 | } | ||
136 | } | ||
137 | |||
138 | void flush_cache_4096_all(unsigned long start) | ||
139 | { | ||
140 | if (cpu_data->dcache.ways == 1) | ||
141 | __flush_cache_4096_all(start); | ||
142 | else | ||
143 | __flush_cache_4096_all_ex(start); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Write back the range of D-cache, and purge the I-cache. | ||
148 | * | ||
149 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | ||
150 | */ | ||
151 | void flush_icache_range(unsigned long start, unsigned long end) | ||
152 | { | ||
153 | flush_cache_all(); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Write back the D-cache and purge the I-cache for signal trampoline. | ||
158 | * .. which happens to be the same behavior as flush_icache_range(). | ||
159 | * So, we simply flush out a line. | ||
160 | */ | ||
161 | void flush_cache_sigtramp(unsigned long addr) | ||
162 | { | ||
163 | unsigned long v, index; | ||
164 | unsigned long flags; | ||
165 | int i; | ||
166 | |||
167 | v = addr & ~(L1_CACHE_BYTES-1); | ||
168 | asm volatile("ocbwb %0" | ||
169 | : /* no output */ | ||
170 | : "m" (__m(v))); | ||
171 | |||
172 | index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); | ||
173 | |||
174 | local_irq_save(flags); | ||
175 | jump_to_P2(); | ||
176 | for(i = 0; i < cpu_data->icache.ways; i++, index += cpu_data->icache.way_incr) | ||
177 | ctrl_outl(0, index); /* Clear out Valid-bit */ | ||
178 | back_to_P1(); | ||
179 | local_irq_restore(flags); | ||
180 | } | ||
181 | |||
182 | static inline void flush_cache_4096(unsigned long start, | ||
183 | unsigned long phys) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | extern void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset); | ||
187 | |||
188 | /* | ||
189 | * SH7751, SH7751R, and ST40 have no restriction to handle cache. | ||
190 | * (While SH7750 must do that at P2 area.) | ||
191 | */ | ||
192 | if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) | ||
193 | || start < CACHE_OC_ADDRESS_ARRAY) { | ||
194 | local_irq_save(flags); | ||
195 | __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0x20000000); | ||
196 | local_irq_restore(flags); | ||
197 | } else { | ||
198 | __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Write back & invalidate the D-cache of the page. | ||
204 | * (To avoid "alias" issues) | ||
205 | */ | ||
206 | void flush_dcache_page(struct page *page) | ||
207 | { | ||
208 | if (test_bit(PG_mapped, &page->flags)) { | ||
209 | unsigned long phys = PHYSADDR(page_address(page)); | ||
210 | |||
211 | /* Loop all the D-cache */ | ||
212 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys); | ||
213 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys); | ||
214 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys); | ||
215 | flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | static inline void flush_icache_all(void) | ||
220 | { | ||
221 | unsigned long flags, ccr; | ||
222 | |||
223 | local_irq_save(flags); | ||
224 | jump_to_P2(); | ||
225 | |||
226 | /* Flush I-cache */ | ||
227 | ccr = ctrl_inl(CCR); | ||
228 | ccr |= CCR_CACHE_ICI; | ||
229 | ctrl_outl(ccr, CCR); | ||
230 | |||
231 | back_to_P1(); | ||
232 | local_irq_restore(flags); | ||
233 | } | ||
234 | |||
235 | void flush_cache_all(void) | ||
236 | { | ||
237 | if (cpu_data->dcache.ways == 1) | ||
238 | __flush_dcache_all(); | ||
239 | else | ||
240 | __flush_dcache_all_ex(); | ||
241 | flush_icache_all(); | ||
242 | } | ||
243 | |||
244 | void flush_cache_mm(struct mm_struct *mm) | ||
245 | { | ||
246 | /* Is there any good way? */ | ||
247 | /* XXX: possibly call flush_cache_range for each vm area */ | ||
248 | /* | ||
249 | * FIXME: Really, the optimal solution here would be able to flush out | ||
250 | * individual lines created by the specified context, but this isn't | ||
251 | * feasible for a number of architectures (such as MIPS, and some | ||
252 | * SPARC) .. is this possible for SuperH? | ||
253 | * | ||
254 | * In the meantime, we'll just flush all of the caches.. this | ||
255 | * seems to be the simplest way to avoid at least a few wasted | ||
256 | * cache flushes. -Lethal | ||
257 | */ | ||
258 | flush_cache_all(); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Write back and invalidate I/D-caches for the page. | ||
263 | * | ||
264 | * ADDR: Virtual Address (U0 address) | ||
265 | * PFN: Physical page number | ||
266 | */ | ||
267 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) | ||
268 | { | ||
269 | unsigned long phys = pfn << PAGE_SHIFT; | ||
270 | |||
271 | /* We only need to flush D-cache when we have alias */ | ||
272 | if ((address^phys) & CACHE_ALIAS) { | ||
273 | /* Loop 4K of the D-cache */ | ||
274 | flush_cache_4096( | ||
275 | CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS), | ||
276 | phys); | ||
277 | /* Loop another 4K of the D-cache */ | ||
278 | flush_cache_4096( | ||
279 | CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS), | ||
280 | phys); | ||
281 | } | ||
282 | |||
283 | if (vma->vm_flags & VM_EXEC) | ||
284 | /* Loop 4K (half) of the I-cache */ | ||
285 | flush_cache_4096( | ||
286 | CACHE_IC_ADDRESS_ARRAY | (address & 0x1000), | ||
287 | phys); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Write back and invalidate D-caches. | ||
292 | * | ||
293 | * START, END: Virtual Address (U0 address) | ||
294 | * | ||
295 | * NOTE: We need to flush the _physical_ page entry. | ||
296 | * Flushing the cache lines for U0 only isn't enough. | ||
297 | * We need to flush for P1 too, which may contain aliases. | ||
298 | */ | ||
299 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
300 | unsigned long end) | ||
301 | { | ||
302 | unsigned long p = start & PAGE_MASK; | ||
303 | pgd_t *dir; | ||
304 | pmd_t *pmd; | ||
305 | pte_t *pte; | ||
306 | pte_t entry; | ||
307 | unsigned long phys; | ||
308 | unsigned long d = 0; | ||
309 | |||
310 | dir = pgd_offset(vma->vm_mm, p); | ||
311 | pmd = pmd_offset(dir, p); | ||
312 | |||
313 | do { | ||
314 | if (pmd_none(*pmd) || pmd_bad(*pmd)) { | ||
315 | p &= ~((1 << PMD_SHIFT) -1); | ||
316 | p += (1 << PMD_SHIFT); | ||
317 | pmd++; | ||
318 | continue; | ||
319 | } | ||
320 | pte = pte_offset_kernel(pmd, p); | ||
321 | do { | ||
322 | entry = *pte; | ||
323 | if ((pte_val(entry) & _PAGE_PRESENT)) { | ||
324 | phys = pte_val(entry)&PTE_PHYS_MASK; | ||
325 | if ((p^phys) & CACHE_ALIAS) { | ||
326 | d |= 1 << ((p & CACHE_ALIAS)>>12); | ||
327 | d |= 1 << ((phys & CACHE_ALIAS)>>12); | ||
328 | if (d == 0x0f) | ||
329 | goto loop_exit; | ||
330 | } | ||
331 | } | ||
332 | pte++; | ||
333 | p += PAGE_SIZE; | ||
334 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | ||
335 | pmd++; | ||
336 | } while (p < end); | ||
337 | loop_exit: | ||
338 | if (d & 1) | ||
339 | flush_cache_4096_all(0); | ||
340 | if (d & 2) | ||
341 | flush_cache_4096_all(0x1000); | ||
342 | if (d & 4) | ||
343 | flush_cache_4096_all(0x2000); | ||
344 | if (d & 8) | ||
345 | flush_cache_4096_all(0x3000); | ||
346 | if (vma->vm_flags & VM_EXEC) | ||
347 | flush_icache_all(); | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * flush_icache_user_range | ||
352 | * @vma: VMA of the process | ||
353 | * @page: page | ||
354 | * @addr: U0 address | ||
355 | * @len: length of the range (< page size) | ||
356 | */ | ||
357 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
358 | struct page *page, unsigned long addr, int len) | ||
359 | { | ||
360 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
361 | } | ||
362 | |||