aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c504
1 files changed, 79 insertions, 425 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 519e2d16cd06..4a2fbf2864de 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -2,7 +2,7 @@
2 * arch/sh/mm/cache-sh4.c 2 * arch/sh/mm/cache-sh4.c
3 * 3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2007 Paul Mundt 5 * Copyright (C) 2001 - 2009 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow 6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. 7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
8 * 8 *
@@ -15,6 +15,8 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/highmem.h>
19#include <asm/pgtable.h>
18#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
19#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
20 22
@@ -23,21 +25,12 @@
23 * flushing. Anything exceeding this will simply flush the dcache in its 25 * flushing. Anything exceeding this will simply flush the dcache in its
24 * entirety. 26 * entirety.
25 */ 27 */
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27#define MAX_ICACHE_PAGES 32 28#define MAX_ICACHE_PAGES 32
28 29
29static void __flush_cache_one(unsigned long addr, unsigned long phys, 30static void __flush_cache_one(unsigned long addr, unsigned long phys,
30 unsigned long exec_offset); 31 unsigned long exec_offset);
31 32
32/* 33/*
33 * This is initialised here to ensure that it is not placed in the BSS. If
34 * that were to happen, note that cache_init gets called before the BSS is
35 * cleared, so this would get nulled out which would be hopeless.
36 */
37static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
39
40/*
41 * Write back the range of D-cache, and purge the I-cache. 34 * Write back the range of D-cache, and purge the I-cache.
42 * 35 *
43 * Called from kernel/module.c:sys_init_module and routine for a.out format, 36 * Called from kernel/module.c:sys_init_module and routine for a.out format,
@@ -94,15 +87,16 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys)
94 unsigned long flags, exec_offset = 0; 87 unsigned long flags, exec_offset = 0;
95 88
96 /* 89 /*
97 * All types of SH-4 require PC to be in P2 to operate on the I-cache. 90 * All types of SH-4 require PC to be uncached to operate on the I-cache.
98 * Some types of SH-4 require PC to be in P2 to operate on the D-cache. 91 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
99 */ 92 */
100 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || 93 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
101 (start < CACHE_OC_ADDRESS_ARRAY)) 94 (start < CACHE_OC_ADDRESS_ARRAY))
102 exec_offset = 0x20000000; 95 exec_offset = cached_to_uncached;
103 96
104 local_irq_save(flags); 97 local_irq_save(flags);
105 __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); 98 __flush_cache_one(start | SH_CACHE_ASSOC,
99 virt_to_phys(phys), exec_offset);
106 local_irq_restore(flags); 100 local_irq_restore(flags);
107} 101}
108 102
@@ -121,13 +115,13 @@ static void sh4_flush_dcache_page(void *arg)
121 else 115 else
122#endif 116#endif
123 { 117 {
124 unsigned long phys = PHYSADDR(page_address(page)); 118 unsigned long phys = page_to_phys(page);
125 unsigned long addr = CACHE_OC_ADDRESS_ARRAY; 119 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
126 int i, n; 120 int i, n;
127 121
128 /* Loop all the D-cache */ 122 /* Loop all the D-cache */
129 n = boot_cpu_data.dcache.n_aliases; 123 n = boot_cpu_data.dcache.n_aliases;
130 for (i = 0; i < n; i++, addr += PAGE_SIZE) 124 for (i = 0; i <= n; i++, addr += PAGE_SIZE)
131 flush_cache_one(addr, phys); 125 flush_cache_one(addr, phys);
132 } 126 }
133 127
@@ -156,10 +150,27 @@ static void __uses_jump_to_uncached flush_icache_all(void)
156 local_irq_restore(flags); 150 local_irq_restore(flags);
157} 151}
158 152
159static inline void flush_dcache_all(void) 153static void flush_dcache_all(void)
160{ 154{
161 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); 155 unsigned long addr, end_addr, entry_offset;
162 wmb(); 156
157 end_addr = CACHE_OC_ADDRESS_ARRAY +
158 (current_cpu_data.dcache.sets <<
159 current_cpu_data.dcache.entry_shift) *
160 current_cpu_data.dcache.ways;
161
162 entry_offset = 1 << current_cpu_data.dcache.entry_shift;
163
164 for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
165 __raw_writel(0, addr); addr += entry_offset;
166 __raw_writel(0, addr); addr += entry_offset;
167 __raw_writel(0, addr); addr += entry_offset;
168 __raw_writel(0, addr); addr += entry_offset;
169 __raw_writel(0, addr); addr += entry_offset;
170 __raw_writel(0, addr); addr += entry_offset;
171 __raw_writel(0, addr); addr += entry_offset;
172 __raw_writel(0, addr); addr += entry_offset;
173 }
163} 174}
164 175
165static void sh4_flush_cache_all(void *unused) 176static void sh4_flush_cache_all(void *unused)
@@ -168,89 +179,13 @@ static void sh4_flush_cache_all(void *unused)
168 flush_icache_all(); 179 flush_icache_all();
169} 180}
170 181
171static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
172 unsigned long end)
173{
174 unsigned long d = 0, p = start & PAGE_MASK;
175 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
176 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
177 unsigned long select_bit;
178 unsigned long all_aliases_mask;
179 unsigned long addr_offset;
180 pgd_t *dir;
181 pmd_t *pmd;
182 pud_t *pud;
183 pte_t *pte;
184 int i;
185
186 dir = pgd_offset(mm, p);
187 pud = pud_offset(dir, p);
188 pmd = pmd_offset(pud, p);
189 end = PAGE_ALIGN(end);
190
191 all_aliases_mask = (1 << n_aliases) - 1;
192
193 do {
194 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
195 p &= PMD_MASK;
196 p += PMD_SIZE;
197 pmd++;
198
199 continue;
200 }
201
202 pte = pte_offset_kernel(pmd, p);
203
204 do {
205 unsigned long phys;
206 pte_t entry = *pte;
207
208 if (!(pte_val(entry) & _PAGE_PRESENT)) {
209 pte++;
210 p += PAGE_SIZE;
211 continue;
212 }
213
214 phys = pte_val(entry) & PTE_PHYS_MASK;
215
216 if ((p ^ phys) & alias_mask) {
217 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
218 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
219
220 if (d == all_aliases_mask)
221 goto loop_exit;
222 }
223
224 pte++;
225 p += PAGE_SIZE;
226 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
227 pmd++;
228 } while (p < end);
229
230loop_exit:
231 addr_offset = 0;
232 select_bit = 1;
233
234 for (i = 0; i < n_aliases; i++) {
235 if (d & select_bit) {
236 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
237 wmb();
238 }
239
240 select_bit <<= 1;
241 addr_offset += PAGE_SIZE;
242 }
243}
244
245/* 182/*
246 * Note : (RPC) since the caches are physically tagged, the only point 183 * Note : (RPC) since the caches are physically tagged, the only point
247 * of flush_cache_mm for SH-4 is to get rid of aliases from the 184 * of flush_cache_mm for SH-4 is to get rid of aliases from the
248 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that 185 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
249 * lines can stay resident so long as the virtual address they were 186 * lines can stay resident so long as the virtual address they were
250 * accessed with (hence cache set) is in accord with the physical 187 * accessed with (hence cache set) is in accord with the physical
251 * address (i.e. tag). It's no different here. So I reckon we don't 188 * address (i.e. tag). It's no different here.
252 * need to flush the I-cache, since aliases don't matter for that. We
253 * should try that.
254 * 189 *
255 * Caller takes mm->mmap_sem. 190 * Caller takes mm->mmap_sem.
256 */ 191 */
@@ -261,33 +196,7 @@ static void sh4_flush_cache_mm(void *arg)
261 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) 196 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
262 return; 197 return;
263 198
264 /* 199 flush_dcache_all();
265 * If cache is only 4k-per-way, there are never any 'aliases'. Since
266 * the cache is physically tagged, the data can just be left in there.
267 */
268 if (boot_cpu_data.dcache.n_aliases == 0)
269 return;
270
271 /*
272 * Don't bother groveling around the dcache for the VMA ranges
273 * if there are too many PTEs to make it worthwhile.
274 */
275 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
276 flush_dcache_all();
277 else {
278 struct vm_area_struct *vma;
279
280 /*
281 * In this case there are reasonably sized ranges to flush,
282 * iterate through the VMA list and take care of any aliases.
283 */
284 for (vma = mm->mmap; vma; vma = vma->vm_next)
285 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
286 }
287
288 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
289 if (mm->exec_vm)
290 flush_icache_all();
291} 200}
292 201
293/* 202/*
@@ -300,44 +209,63 @@ static void sh4_flush_cache_page(void *args)
300{ 209{
301 struct flusher_data *data = args; 210 struct flusher_data *data = args;
302 struct vm_area_struct *vma; 211 struct vm_area_struct *vma;
212 struct page *page;
303 unsigned long address, pfn, phys; 213 unsigned long address, pfn, phys;
304 unsigned int alias_mask; 214 int map_coherent = 0;
215 pgd_t *pgd;
216 pud_t *pud;
217 pmd_t *pmd;
218 pte_t *pte;
219 void *vaddr;
305 220
306 vma = data->vma; 221 vma = data->vma;
307 address = data->addr1; 222 address = data->addr1 & PAGE_MASK;
308 pfn = data->addr2; 223 pfn = data->addr2;
309 phys = pfn << PAGE_SHIFT; 224 phys = pfn << PAGE_SHIFT;
225 page = pfn_to_page(pfn);
310 226
311 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 227 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
312 return; 228 return;
313 229
314 alias_mask = boot_cpu_data.dcache.alias_mask; 230 pgd = pgd_offset(vma->vm_mm, address);
315 231 pud = pud_offset(pgd, address);
316 /* We only need to flush D-cache when we have alias */ 232 pmd = pmd_offset(pud, address);
317 if ((address^phys) & alias_mask) { 233 pte = pte_offset_kernel(pmd, address);
318 /* Loop 4K of the D-cache */ 234
319 flush_cache_one( 235 /* If the page isn't present, there is nothing to do here. */
320 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), 236 if (!(pte_val(*pte) & _PAGE_PRESENT))
321 phys); 237 return;
322 /* Loop another 4K of the D-cache */
323 flush_cache_one(
324 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
325 phys);
326 }
327 238
328 alias_mask = boot_cpu_data.icache.alias_mask; 239 if ((vma->vm_mm == current->active_mm))
329 if (vma->vm_flags & VM_EXEC) { 240 vaddr = NULL;
241 else {
330 /* 242 /*
331 * Evict entries from the portion of the cache from which code 243 * Use kmap_coherent or kmap_atomic to do flushes for
332 * may have been executed at this address (virtual). There's 244 * another ASID than the current one.
333 * no need to evict from the portion corresponding to the
334 * physical address as for the D-cache, because we know the
335 * kernel has never executed the code through its identity
336 * translation.
337 */ 245 */
338 flush_cache_one( 246 map_coherent = (current_cpu_data.dcache.n_aliases &&
339 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), 247 !test_bit(PG_dcache_dirty, &page->flags) &&
340 phys); 248 page_mapped(page));
249 if (map_coherent)
250 vaddr = kmap_coherent(page, address);
251 else
252 vaddr = kmap_atomic(page, KM_USER0);
253
254 address = (unsigned long)vaddr;
255 }
256
257 if (pages_do_alias(address, phys))
258 flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
259 (address & shm_align_mask), phys);
260
261 if (vma->vm_flags & VM_EXEC)
262 flush_icache_all();
263
264 if (vaddr) {
265 if (map_coherent)
266 kunmap_coherent(vaddr);
267 else
268 kunmap_atomic(vaddr, KM_USER0);
341 } 269 }
342} 270}
343 271
@@ -370,24 +298,10 @@ static void sh4_flush_cache_range(void *args)
370 if (boot_cpu_data.dcache.n_aliases == 0) 298 if (boot_cpu_data.dcache.n_aliases == 0)
371 return; 299 return;
372 300
373 /* 301 flush_dcache_all();
374 * Don't bother with the lookup and alias check if we have a
375 * wide range to cover, just blow away the dcache in its
376 * entirety instead. -- PFM.
377 */
378 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
379 flush_dcache_all();
380 else
381 __flush_cache_mm(vma->vm_mm, start, end);
382 302
383 if (vma->vm_flags & VM_EXEC) { 303 if (vma->vm_flags & VM_EXEC)
384 /*
385 * TODO: Is this required??? Need to look at how I-cache
386 * coherency is assured when new programs are loaded to see if
387 * this matters.
388 */
389 flush_icache_all(); 304 flush_icache_all();
390 }
391} 305}
392 306
393/** 307/**
@@ -461,245 +375,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys,
461 } while (--way_count != 0); 375 } while (--way_count != 0);
462} 376}
463 377
464/*
465 * Break the 1, 2 and 4 way variants of this out into separate functions to
466 * avoid nearly all the overhead of having the conditional stuff in the function
467 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
468 *
469 * We want to eliminate unnecessary bus transactions, so this code uses
470 * a non-obvious technique.
471 *
472 * Loop over a cache way sized block of, one cache line at a time. For each
473 * line, use movca.a to cause the current cache line contents to be written
474 * back, but without reading anything from main memory. However this has the
475 * side effect that the cache is now caching that memory location. So follow
476 * this with a cache invalidate to mark the cache line invalid. And do all
477 * this with interrupts disabled, to avoid the cache line being accidently
478 * evicted while it is holding garbage.
479 *
480 * This also breaks in a number of circumstances:
481 * - if there are modifications to the region of memory just above
482 * empty_zero_page (for example because a breakpoint has been placed
483 * there), then these can be lost.
484 *
485 * This is because the the memory address which the cache temporarily
486 * caches in the above description is empty_zero_page. So the
487 * movca.l hits the cache (it is assumed that it misses, or at least
488 * isn't dirty), modifies the line and then invalidates it, losing the
489 * required change.
490 *
491 * - If caches are disabled or configured in write-through mode, then
492 * the movca.l writes garbage directly into memory.
493 */
494static void __flush_dcache_segment_writethrough(unsigned long start,
495 unsigned long extent_per_way)
496{
497 unsigned long addr;
498 int i;
499
500 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
501
502 while (extent_per_way) {
503 for (i = 0; i < cpu_data->dcache.ways; i++)
504 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
505
506 addr += cpu_data->dcache.linesz;
507 extent_per_way -= cpu_data->dcache.linesz;
508 }
509}
510
511static void __flush_dcache_segment_1way(unsigned long start,
512 unsigned long extent_per_way)
513{
514 unsigned long orig_sr, sr_with_bl;
515 unsigned long base_addr;
516 unsigned long way_incr, linesz, way_size;
517 struct cache_info *dcache;
518 register unsigned long a0, a0e;
519
520 asm volatile("stc sr, %0" : "=r" (orig_sr));
521 sr_with_bl = orig_sr | (1<<28);
522 base_addr = ((unsigned long)&empty_zero_page[0]);
523
524 /*
525 * The previous code aligned base_addr to 16k, i.e. the way_size of all
526 * existing SH-4 D-caches. Whilst I don't see a need to have this
527 * aligned to any better than the cache line size (which it will be
528 * anyway by construction), let's align it to at least the way_size of
529 * any existing or conceivable SH-4 D-cache. -- RPC
530 */
531 base_addr = ((base_addr >> 16) << 16);
532 base_addr |= start;
533
534 dcache = &boot_cpu_data.dcache;
535 linesz = dcache->linesz;
536 way_incr = dcache->way_incr;
537 way_size = dcache->way_size;
538
539 a0 = base_addr;
540 a0e = base_addr + extent_per_way;
541 do {
542 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
543 asm volatile("movca.l r0, @%0\n\t"
544 "ocbi @%0" : : "r" (a0));
545 a0 += linesz;
546 asm volatile("movca.l r0, @%0\n\t"
547 "ocbi @%0" : : "r" (a0));
548 a0 += linesz;
549 asm volatile("movca.l r0, @%0\n\t"
550 "ocbi @%0" : : "r" (a0));
551 a0 += linesz;
552 asm volatile("movca.l r0, @%0\n\t"
553 "ocbi @%0" : : "r" (a0));
554 asm volatile("ldc %0, sr" : : "r" (orig_sr));
555 a0 += linesz;
556 } while (a0 < a0e);
557}
558
559static void __flush_dcache_segment_2way(unsigned long start,
560 unsigned long extent_per_way)
561{
562 unsigned long orig_sr, sr_with_bl;
563 unsigned long base_addr;
564 unsigned long way_incr, linesz, way_size;
565 struct cache_info *dcache;
566 register unsigned long a0, a1, a0e;
567
568 asm volatile("stc sr, %0" : "=r" (orig_sr));
569 sr_with_bl = orig_sr | (1<<28);
570 base_addr = ((unsigned long)&empty_zero_page[0]);
571
572 /* See comment under 1-way above */
573 base_addr = ((base_addr >> 16) << 16);
574 base_addr |= start;
575
576 dcache = &boot_cpu_data.dcache;
577 linesz = dcache->linesz;
578 way_incr = dcache->way_incr;
579 way_size = dcache->way_size;
580
581 a0 = base_addr;
582 a1 = a0 + way_incr;
583 a0e = base_addr + extent_per_way;
584 do {
585 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
586 asm volatile("movca.l r0, @%0\n\t"
587 "movca.l r0, @%1\n\t"
588 "ocbi @%0\n\t"
589 "ocbi @%1" : :
590 "r" (a0), "r" (a1));
591 a0 += linesz;
592 a1 += linesz;
593 asm volatile("movca.l r0, @%0\n\t"
594 "movca.l r0, @%1\n\t"
595 "ocbi @%0\n\t"
596 "ocbi @%1" : :
597 "r" (a0), "r" (a1));
598 a0 += linesz;
599 a1 += linesz;
600 asm volatile("movca.l r0, @%0\n\t"
601 "movca.l r0, @%1\n\t"
602 "ocbi @%0\n\t"
603 "ocbi @%1" : :
604 "r" (a0), "r" (a1));
605 a0 += linesz;
606 a1 += linesz;
607 asm volatile("movca.l r0, @%0\n\t"
608 "movca.l r0, @%1\n\t"
609 "ocbi @%0\n\t"
610 "ocbi @%1" : :
611 "r" (a0), "r" (a1));
612 asm volatile("ldc %0, sr" : : "r" (orig_sr));
613 a0 += linesz;
614 a1 += linesz;
615 } while (a0 < a0e);
616}
617
618static void __flush_dcache_segment_4way(unsigned long start,
619 unsigned long extent_per_way)
620{
621 unsigned long orig_sr, sr_with_bl;
622 unsigned long base_addr;
623 unsigned long way_incr, linesz, way_size;
624 struct cache_info *dcache;
625 register unsigned long a0, a1, a2, a3, a0e;
626
627 asm volatile("stc sr, %0" : "=r" (orig_sr));
628 sr_with_bl = orig_sr | (1<<28);
629 base_addr = ((unsigned long)&empty_zero_page[0]);
630
631 /* See comment under 1-way above */
632 base_addr = ((base_addr >> 16) << 16);
633 base_addr |= start;
634
635 dcache = &boot_cpu_data.dcache;
636 linesz = dcache->linesz;
637 way_incr = dcache->way_incr;
638 way_size = dcache->way_size;
639
640 a0 = base_addr;
641 a1 = a0 + way_incr;
642 a2 = a1 + way_incr;
643 a3 = a2 + way_incr;
644 a0e = base_addr + extent_per_way;
645 do {
646 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
647 asm volatile("movca.l r0, @%0\n\t"
648 "movca.l r0, @%1\n\t"
649 "movca.l r0, @%2\n\t"
650 "movca.l r0, @%3\n\t"
651 "ocbi @%0\n\t"
652 "ocbi @%1\n\t"
653 "ocbi @%2\n\t"
654 "ocbi @%3\n\t" : :
655 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
656 a0 += linesz;
657 a1 += linesz;
658 a2 += linesz;
659 a3 += linesz;
660 asm volatile("movca.l r0, @%0\n\t"
661 "movca.l r0, @%1\n\t"
662 "movca.l r0, @%2\n\t"
663 "movca.l r0, @%3\n\t"
664 "ocbi @%0\n\t"
665 "ocbi @%1\n\t"
666 "ocbi @%2\n\t"
667 "ocbi @%3\n\t" : :
668 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
669 a0 += linesz;
670 a1 += linesz;
671 a2 += linesz;
672 a3 += linesz;
673 asm volatile("movca.l r0, @%0\n\t"
674 "movca.l r0, @%1\n\t"
675 "movca.l r0, @%2\n\t"
676 "movca.l r0, @%3\n\t"
677 "ocbi @%0\n\t"
678 "ocbi @%1\n\t"
679 "ocbi @%2\n\t"
680 "ocbi @%3\n\t" : :
681 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
682 a0 += linesz;
683 a1 += linesz;
684 a2 += linesz;
685 a3 += linesz;
686 asm volatile("movca.l r0, @%0\n\t"
687 "movca.l r0, @%1\n\t"
688 "movca.l r0, @%2\n\t"
689 "movca.l r0, @%3\n\t"
690 "ocbi @%0\n\t"
691 "ocbi @%1\n\t"
692 "ocbi @%2\n\t"
693 "ocbi @%3\n\t" : :
694 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
695 asm volatile("ldc %0, sr" : : "r" (orig_sr));
696 a0 += linesz;
697 a1 += linesz;
698 a2 += linesz;
699 a3 += linesz;
700 } while (a0 < a0e);
701}
702
703extern void __weak sh4__flush_region_init(void); 378extern void __weak sh4__flush_region_init(void);
704 379
705/* 380/*
@@ -707,32 +382,11 @@ extern void __weak sh4__flush_region_init(void);
707 */ 382 */
708void __init sh4_cache_init(void) 383void __init sh4_cache_init(void)
709{ 384{
710 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
711
712 printk("PVR=%08x CVR=%08x PRR=%08x\n", 385 printk("PVR=%08x CVR=%08x PRR=%08x\n",
713 ctrl_inl(CCN_PVR), 386 ctrl_inl(CCN_PVR),
714 ctrl_inl(CCN_CVR), 387 ctrl_inl(CCN_CVR),
715 ctrl_inl(CCN_PRR)); 388 ctrl_inl(CCN_PRR));
716 389
717 if (wt_enabled)
718 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
719 else {
720 switch (boot_cpu_data.dcache.ways) {
721 case 1:
722 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
723 break;
724 case 2:
725 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
726 break;
727 case 4:
728 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
729 break;
730 default:
731 panic("unknown number of cache ways\n");
732 break;
733 }
734 }
735
736 local_flush_icache_range = sh4_flush_icache_range; 390 local_flush_icache_range = sh4_flush_icache_range;
737 local_flush_dcache_page = sh4_flush_dcache_page; 391 local_flush_dcache_page = sh4_flush_dcache_page;
738 local_flush_cache_all = sh4_flush_cache_all; 392 local_flush_cache_all = sh4_flush_cache_all;